Compare commits
81 commits
chore/form
...
next
Author | SHA1 | Date | |
---|---|---|---|
|
3c93c81204 | ||
|
6767ca8bc8 | ||
|
f8d7ef04e6 | ||
|
892fb8846a | ||
|
bca8d1f70f | ||
|
65fe6b0ab5 | ||
|
fea85b0894 | ||
|
a7405cddc0 | ||
|
3df21e8257 | ||
|
e4d6202840 | ||
|
c4810a3a08 | ||
|
73d0536cd3 | ||
|
a6797ca0a2 | ||
|
cdd03dfec0 | ||
|
2bab8869d0 | ||
|
cbd3b07ca7 | ||
|
27d6d94355 | ||
|
a3716a7d5a | ||
|
a9c3867287 | ||
|
423b0928d5 | ||
|
44dd21f432 | ||
|
75a0f68349 | ||
|
8abab8c8a0 | ||
|
324e1beabf | ||
|
00c9ef7b56 | ||
|
6455e918be | ||
|
ea3e7045b4 | ||
|
b8a1b4fee5 | ||
|
d95345377b | ||
|
75322af8c7 | ||
|
11187b3fad | ||
|
35ed731a46 | ||
|
1f313c6807 | ||
|
e70d27af98 | ||
|
ba8429cafe | ||
|
7a4d0f6fe8 | ||
|
2f45a907f9 | ||
|
de0deda179 | ||
|
62f1da053f | ||
|
602c56cae9 | ||
|
4b9520b5ad | ||
|
9014e43ce1 | ||
|
ffc57f8997 | ||
|
fd19dda5cb | ||
|
dc0fa09a57 | ||
|
ba1138aaa3 | ||
|
6398136163 | ||
|
16af8b58ae | ||
|
7a5b893013 | ||
|
c453d45598 | ||
|
144d548ef7 | ||
|
7b259272ce | ||
|
48c1f3bdba | ||
|
dd19877528 | ||
|
ba2a5a6115 | ||
|
a36ccff06a | ||
|
39b4932725 | ||
|
c45e52f45a | ||
|
1dbb3433e0 | ||
|
efecb78888 | ||
|
f25a0b49eb | ||
|
b46000fadc | ||
|
7b19618136 | ||
|
19154a9f70 | ||
|
ec8dfc283c | ||
|
be1b8b68a7 | ||
|
6c2eb4c786 | ||
|
3df791e030 | ||
|
9374b74e77 | ||
|
c732c7c97f | ||
|
33c9da75ec | ||
|
59d7674b2a | ||
|
6bcc2f80b8 | ||
|
817f382c5f | ||
|
a888c7cb16 | ||
|
47aadcea1d | ||
|
9b8ec21e6e | ||
|
e51f60e437 | ||
|
11990e7524 | ||
|
8876d54d78 | ||
|
d8badaf64b |
69 changed files with 3766 additions and 2178 deletions
|
@ -103,6 +103,11 @@ artifacts:
|
||||||
- ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl
|
- ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl
|
||||||
- cp result/bin/conduit aarch64-unknown-linux-musl
|
- cp result/bin/conduit aarch64-unknown-linux-musl
|
||||||
|
|
||||||
|
- mkdir -p target/aarch64-unknown-linux-musl/release
|
||||||
|
- cp result/bin/conduit target/aarch64-unknown-linux-musl/release
|
||||||
|
- direnv exec . cargo deb --no-strip --no-build --target aarch64-unknown-linux-musl
|
||||||
|
- mv target/aarch64-unknown-linux-musl/debian/*.deb aarch64-unknown-linux-musl.deb
|
||||||
|
|
||||||
- ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl
|
- ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl
|
||||||
- cp result oci-image-arm64v8.tar.gz
|
- cp result oci-image-arm64v8.tar.gz
|
||||||
|
|
||||||
|
@ -114,6 +119,7 @@ artifacts:
|
||||||
- x86_64-unknown-linux-musl
|
- x86_64-unknown-linux-musl
|
||||||
- aarch64-unknown-linux-musl
|
- aarch64-unknown-linux-musl
|
||||||
- x86_64-unknown-linux-musl.deb
|
- x86_64-unknown-linux-musl.deb
|
||||||
|
- aarch64-unknown-linux-musl.deb
|
||||||
- oci-image-amd64.tar.gz
|
- oci-image-amd64.tar.gz
|
||||||
- oci-image-arm64v8.tar.gz
|
- oci-image-arm64v8.tar.gz
|
||||||
- public
|
- public
|
||||||
|
|
1347
Cargo.lock
generated
1347
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
73
Cargo.toml
73
Cargo.toml
|
@ -16,10 +16,10 @@ license = "Apache-2.0"
|
||||||
name = "conduit"
|
name = "conduit"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
repository = "https://gitlab.com/famedly/conduit"
|
repository = "https://gitlab.com/famedly/conduit"
|
||||||
version = "0.8.0-alpha"
|
version = "0.10.0-alpha"
|
||||||
|
|
||||||
# See also `rust-toolchain.toml`
|
# See also `rust-toolchain.toml`
|
||||||
rust-version = "1.78.0"
|
rust-version = "1.79.0"
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
@ -28,42 +28,24 @@ workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
# Web framework
|
# Web framework
|
||||||
axum = { version = "0.6.18", default-features = false, features = [
|
axum = { version = "0.7", default-features = false, features = [
|
||||||
"form",
|
"form",
|
||||||
"headers",
|
|
||||||
"http1",
|
"http1",
|
||||||
"http2",
|
"http2",
|
||||||
"json",
|
"json",
|
||||||
"matched-path",
|
"matched-path",
|
||||||
], optional = true }
|
], optional = true }
|
||||||
axum-server = { version = "0.5.1", features = ["tls-rustls"] }
|
axum-extra = { version = "0.9", features = ["typed-header"] }
|
||||||
|
axum-server = { version = "0.6", features = ["tls-rustls"] }
|
||||||
tower = { version = "0.4.13", features = ["util"] }
|
tower = { version = "0.4.13", features = ["util"] }
|
||||||
tower-http = { version = "0.4.1", features = [
|
tower-http = { version = "0.5", features = [
|
||||||
"add-extension",
|
"add-extension",
|
||||||
"cors",
|
"cors",
|
||||||
"sensitive-headers",
|
"sensitive-headers",
|
||||||
"trace",
|
"trace",
|
||||||
"util",
|
"util",
|
||||||
] }
|
] }
|
||||||
|
tower-service = "0.3"
|
||||||
# Used for matrix spec type definitions and helpers
|
|
||||||
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
|
||||||
ruma = { git = "https://github.com/ruma/ruma", rev = "5495b85aa311c2805302edb0a7de40399e22b397", features = [
|
|
||||||
"appservice-api-c",
|
|
||||||
"client-api",
|
|
||||||
"compat",
|
|
||||||
"federation-api",
|
|
||||||
"push-gateway-api-c",
|
|
||||||
"rand",
|
|
||||||
"ring-compat",
|
|
||||||
"state-res",
|
|
||||||
"unstable-exhaustive-types",
|
|
||||||
"unstable-msc2448",
|
|
||||||
"unstable-msc3575",
|
|
||||||
"unstable-unspecified",
|
|
||||||
] }
|
|
||||||
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
|
||||||
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
|
||||||
|
|
||||||
# Async runtime and utilities
|
# Async runtime and utilities
|
||||||
tokio = { version = "1.28.1", features = ["fs", "macros", "signal", "sync"] }
|
tokio = { version = "1.28.1", features = ["fs", "macros", "signal", "sync"] }
|
||||||
|
@ -74,7 +56,7 @@ persy = { version = "1.4.4", optional = true, features = ["background_ops"] }
|
||||||
|
|
||||||
# Used for the http request / response body type for Ruma endpoints used with reqwest
|
# Used for the http request / response body type for Ruma endpoints used with reqwest
|
||||||
bytes = "1.4.0"
|
bytes = "1.4.0"
|
||||||
http = "0.2.9"
|
http = "1"
|
||||||
# Used to find data directory for default db path
|
# Used to find data directory for default db path
|
||||||
directories = "5"
|
directories = "5"
|
||||||
# Used for ruma wrapper
|
# Used for ruma wrapper
|
||||||
|
@ -88,8 +70,14 @@ rand = "0.8.5"
|
||||||
# Used to hash passwords
|
# Used to hash passwords
|
||||||
rust-argon2 = "2"
|
rust-argon2 = "2"
|
||||||
# Used to send requests
|
# Used to send requests
|
||||||
hyper = "0.14.26"
|
hyper = "1.1"
|
||||||
reqwest = { version = "0.11.18", default-features = false, features = [
|
hyper-util = { version = "0.1", features = [
|
||||||
|
"client",
|
||||||
|
"client-legacy",
|
||||||
|
"http1",
|
||||||
|
"http2",
|
||||||
|
] }
|
||||||
|
reqwest = { version = "0.12", default-features = false, features = [
|
||||||
"rustls-tls-native-roots",
|
"rustls-tls-native-roots",
|
||||||
"socks",
|
"socks",
|
||||||
] }
|
] }
|
||||||
|
@ -112,11 +100,13 @@ regex = "1.8.1"
|
||||||
# jwt jsonwebtokens
|
# jwt jsonwebtokens
|
||||||
jsonwebtoken = "9.2.0"
|
jsonwebtoken = "9.2.0"
|
||||||
# Performance measurements
|
# Performance measurements
|
||||||
opentelemetry = { version = "0.18.0", features = ["rt-tokio"] }
|
opentelemetry = "0.22"
|
||||||
opentelemetry-jaeger = { version = "0.17.0", features = ["rt-tokio"] }
|
opentelemetry-jaeger-propagator = "0.1"
|
||||||
tracing = { version = "0.1.37", features = [] }
|
opentelemetry-otlp = "0.15"
|
||||||
|
opentelemetry_sdk = { version = "0.22", features = ["rt-tokio"] }
|
||||||
|
tracing = "0.1.37"
|
||||||
tracing-flame = "0.2.0"
|
tracing-flame = "0.2.0"
|
||||||
tracing-opentelemetry = "0.18.0"
|
tracing-opentelemetry = "0.23"
|
||||||
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
|
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
|
||||||
|
|
||||||
lru-cache = "0.1.2"
|
lru-cache = "0.1.2"
|
||||||
|
@ -157,6 +147,25 @@ tikv-jemallocator = { version = "0.5.0", features = [
|
||||||
|
|
||||||
sd-notify = { version = "0.4.1", optional = true }
|
sd-notify = { version = "0.4.1", optional = true }
|
||||||
|
|
||||||
|
# Used for matrix spec type definitions and helpers
|
||||||
|
[dependencies.ruma]
|
||||||
|
features = [
|
||||||
|
"appservice-api-c",
|
||||||
|
"client-api",
|
||||||
|
"compat",
|
||||||
|
"federation-api",
|
||||||
|
"push-gateway-api-c",
|
||||||
|
"rand",
|
||||||
|
"ring-compat",
|
||||||
|
"server-util",
|
||||||
|
"state-res",
|
||||||
|
"unstable-exhaustive-types",
|
||||||
|
"unstable-msc2448",
|
||||||
|
"unstable-msc3575",
|
||||||
|
"unstable-unspecified",
|
||||||
|
]
|
||||||
|
git = "https://github.com/ruma/ruma"
|
||||||
|
|
||||||
[dependencies.rocksdb]
|
[dependencies.rocksdb]
|
||||||
features = ["lz4", "multi-threaded-cf", "zstd"]
|
features = ["lz4", "multi-threaded-cf", "zstd"]
|
||||||
optional = true
|
optional = true
|
||||||
|
|
|
@ -56,6 +56,13 @@ If you have any questions, feel free to
|
||||||
- Send an direct message to `@timokoesters:fachschaften.org` on Matrix
|
- Send an direct message to `@timokoesters:fachschaften.org` on Matrix
|
||||||
- [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new)
|
- [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new)
|
||||||
|
|
||||||
|
#### Security
|
||||||
|
|
||||||
|
If you believe you have found a security issue, please send a message to [Timo](https://matrix.to/#/@timo:conduit.rs)
|
||||||
|
and/or [Matthias](https://matrix.to/#/@matthias:ahouansou.cz) on Matrix, or send an email to
|
||||||
|
[conduit@koesters.xyz](mailto:conduit@koesters.xyz). Please do not disclose details about the issue to anyone else before
|
||||||
|
a fix is released publically.
|
||||||
|
|
||||||
#### Thanks to
|
#### Thanks to
|
||||||
|
|
||||||
Thanks to FUTO, Famedly, Prototype Fund (DLR and German BMBF) and all individuals for financially supporting this project.
|
Thanks to FUTO, Famedly, Prototype Fund (DLR and German BMBF) and all individuals for financially supporting this project.
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM rust:1.78.0
|
FROM rust:1.79.0
|
||||||
|
|
||||||
WORKDIR /workdir
|
WORKDIR /workdir
|
||||||
|
|
||||||
|
|
20
default.nix
20
default.nix
|
@ -1,14 +1,10 @@
|
||||||
(
|
(import
|
||||||
import
|
|
||||||
(
|
(
|
||||||
let
|
let lock = builtins.fromJSON (builtins.readFile ./flake.lock); in
|
||||||
lock = builtins.fromJSON (builtins.readFile ./flake.lock);
|
fetchTarball {
|
||||||
in
|
url = lock.nodes.flake-compat.locked.url or "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz";
|
||||||
fetchTarball {
|
sha256 = lock.nodes.flake-compat.locked.narHash;
|
||||||
url = lock.nodes.flake-compat.locked.url or "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz";
|
}
|
||||||
sha256 = lock.nodes.flake-compat.locked.narHash;
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
{src = ./.;}
|
{ src = ./.; }
|
||||||
)
|
).defaultNix
|
||||||
.defaultNix
|
|
||||||
|
|
|
@ -6,6 +6,8 @@
|
||||||
|
|
||||||
> **Note:** If you update the configuration file, you must restart Conduit for the changes to take effect
|
> **Note:** If you update the configuration file, you must restart Conduit for the changes to take effect
|
||||||
|
|
||||||
|
> **Note:** You can also configure Conduit by using `CONDUIT_{field_name}` environment variables. To set values inside a table, use `CONDUIT_{table_name}__{field_name}`. Example: `CONDUIT_SERVER_NAME="example.org"`
|
||||||
|
|
||||||
Conduit's configuration file is divided into the following sections:
|
Conduit's configuration file is divided into the following sections:
|
||||||
|
|
||||||
- [Global](#global)
|
- [Global](#global)
|
||||||
|
@ -56,7 +58,8 @@ The `global` section contains the following fields:
|
||||||
| `turn_secret` | `string` | The TURN secret | `""` |
|
| `turn_secret` | `string` | The TURN secret | `""` |
|
||||||
| `turn_ttl` | `integer` | The TURN TTL in seconds | `86400` |
|
| `turn_ttl` | `integer` | The TURN TTL in seconds | `86400` |
|
||||||
| `emergency_password` | `string` | Set a password to login as the `conduit` user in case of emergency | N/A |
|
| `emergency_password` | `string` | Set a password to login as the `conduit` user in case of emergency | N/A |
|
||||||
| `well_known` | `table` | Used for [delegation](delegation.md) | See [delegation](delegation.md) |
|
| `well_known_client` | `string` | Used for [delegation](delegation.md) | See [delegation](delegation.md) |
|
||||||
|
| `well_known_server` | `string` | Used for [delegation](delegation.md) | See [delegation](delegation.md) |
|
||||||
|
|
||||||
|
|
||||||
### TLS
|
### TLS
|
||||||
|
|
|
@ -16,18 +16,18 @@ are connected to the server running Conduit using something like a VPN.
|
||||||
|
|
||||||
> **Note**: this will automatically allow you to use [sliding sync][0] without any extra configuration
|
> **Note**: this will automatically allow you to use [sliding sync][0] without any extra configuration
|
||||||
|
|
||||||
To configure it, use the following options in the `global.well_known` table:
|
To configure it, use the following options:
|
||||||
| Field | Type | Description | Default |
|
| Field | Type | Description | Default |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- | --- | --- |
|
||||||
| `client` | `String` | The URL that clients should use to connect to Conduit | `https://<server_name>` |
|
| `well_known_client` | `String` | The URL that clients should use to connect to Conduit | `https://<server_name>` |
|
||||||
| `server` | `String` | The hostname and port servers should use to connect to Conduit | `<server_name>:443` |
|
| `well_known_server` | `String` | The hostname and port servers should use to connect to Conduit | `<server_name>:443` |
|
||||||
|
|
||||||
### Example
|
### Example
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
[global.well_known]
|
[global]
|
||||||
client = "https://matrix.example.org"
|
well_known_client = "https://matrix.example.org"
|
||||||
server = "matrix.example.org:443"
|
well_known_server = "matrix.example.org:443"
|
||||||
```
|
```
|
||||||
|
|
||||||
## Manual
|
## Manual
|
||||||
|
|
|
@ -64,6 +64,7 @@ docker run -d -p 8448:6167 \
|
||||||
-e CONDUIT_MAX_REQUEST_SIZE="20000000" \
|
-e CONDUIT_MAX_REQUEST_SIZE="20000000" \
|
||||||
-e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \
|
-e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \
|
||||||
-e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \
|
-e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \
|
||||||
|
-e CONDUIT_PORT="6167" \
|
||||||
--name conduit <link>
|
--name conduit <link>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,7 @@ You may simply download the binary that fits your machine. Run `uname -m` to see
|
||||||
| Target | Type | Download |
|
| Target | Type | Download |
|
||||||
|-|-|-|
|
|-|-|-|
|
||||||
| `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/x86_64-unknown-linux-musl.deb?job=artifacts) |
|
| `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/x86_64-unknown-linux-musl.deb?job=artifacts) |
|
||||||
|
| `aarch64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/aarch64-unknown-linux-musl.deb?job=artifacts) |
|
||||||
| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/x86_64-unknown-linux-musl?job=artifacts) |
|
| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/x86_64-unknown-linux-musl?job=artifacts) |
|
||||||
| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/aarch64-unknown-linux-musl?job=artifacts) |
|
| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/aarch64-unknown-linux-musl?job=artifacts) |
|
||||||
| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/oci-image-amd64.tar.gz?job=artifacts) |
|
| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/oci-image-amd64.tar.gz?job=artifacts) |
|
||||||
|
@ -30,6 +31,7 @@ If you use a system with an older glibc version (e.g. RHEL8), you might need to
|
||||||
| Target | Type | Download |
|
| Target | Type | Download |
|
||||||
|-|-|-|
|
|-|-|-|
|
||||||
| `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl.deb?job=artifacts) |
|
| `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl.deb?job=artifacts) |
|
||||||
|
| `aarch64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl.deb?job=artifacts) |
|
||||||
| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl?job=artifacts) |
|
| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl?job=artifacts) |
|
||||||
| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl?job=artifacts) |
|
| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl?job=artifacts) |
|
||||||
| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=artifacts) |
|
| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=artifacts) |
|
||||||
|
|
|
@ -35,3 +35,7 @@ Here is an example:
|
||||||
|
|
||||||
Not really. You can reuse the domain of your current server with Conduit, but you will not be able to migrate accounts automatically.
|
Not really. You can reuse the domain of your current server with Conduit, but you will not be able to migrate accounts automatically.
|
||||||
Rooms that were federated can be re-joined via the other participating servers, however media and the like may be deleted from remote servers after some time, and hence might not be recoverable.
|
Rooms that were federated can be re-joined via the other participating servers, however media and the like may be deleted from remote servers after some time, and hence might not be recoverable.
|
||||||
|
|
||||||
|
## How do I make someone an admin?
|
||||||
|
|
||||||
|
Simply invite them to the admin room. Once joined, they can administer the server by interacting with the `@conduit:<server_name>` user.
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
## General instructions
|
## General instructions
|
||||||
|
|
||||||
* It is assumed you have a [Coturn server](https://github.com/coturn/coturn) up and running. See [Synapse reference implementation](https://github.com/matrix-org/synapse/blob/develop/docs/turn-howto.md).
|
* It is assumed you have a [Coturn server](https://github.com/coturn/coturn) up and running. See [Synapse reference implementation](https://github.com/element-hq/synapse/blob/develop/docs/turn-howto.md).
|
||||||
|
|
||||||
## Edit/Add a few settings to your existing conduit.toml
|
## Edit/Add a few settings to your existing conduit.toml
|
||||||
|
|
||||||
|
|
10
engage.toml
10
engage.toml
|
@ -35,11 +35,6 @@ group = "versions"
|
||||||
name = "lychee"
|
name = "lychee"
|
||||||
script = "lychee --version"
|
script = "lychee --version"
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "versions"
|
|
||||||
name = "alejandra"
|
|
||||||
script = "alejandra --version"
|
|
||||||
|
|
||||||
[[task]]
|
[[task]]
|
||||||
group = "lints"
|
group = "lints"
|
||||||
name = "cargo-fmt"
|
name = "cargo-fmt"
|
||||||
|
@ -71,11 +66,6 @@ group = "lints"
|
||||||
name = "lychee"
|
name = "lychee"
|
||||||
script = "lychee --offline docs"
|
script = "lychee --offline docs"
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "lints"
|
|
||||||
name = "alejandra"
|
|
||||||
script = "alejandra --check ."
|
|
||||||
|
|
||||||
[[task]]
|
[[task]]
|
||||||
group = "tests"
|
group = "tests"
|
||||||
name = "cargo"
|
name = "cargo"
|
||||||
|
|
73
flake.nix
73
flake.nix
|
@ -19,10 +19,10 @@
|
||||||
attic.url = "github:zhaofengli/attic?ref=main";
|
attic.url = "github:zhaofengli/attic?ref=main";
|
||||||
};
|
};
|
||||||
|
|
||||||
outputs = inputs: let
|
outputs = inputs:
|
||||||
# Keep sorted
|
let
|
||||||
mkScope = pkgs:
|
# Keep sorted
|
||||||
pkgs.lib.makeScope pkgs.newScope (self: {
|
mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: {
|
||||||
craneLib =
|
craneLib =
|
||||||
(inputs.crane.mkLib pkgs).overrideToolchain self.toolchain;
|
(inputs.crane.mkLib pkgs).overrideToolchain self.toolchain;
|
||||||
|
|
||||||
|
@ -34,24 +34,24 @@
|
||||||
|
|
||||||
book = self.callPackage ./nix/pkgs/book {};
|
book = self.callPackage ./nix/pkgs/book {};
|
||||||
|
|
||||||
rocksdb = let
|
rocksdb =
|
||||||
|
let
|
||||||
version = "9.1.1";
|
version = "9.1.1";
|
||||||
in
|
in
|
||||||
pkgs.rocksdb.overrideAttrs (old: {
|
pkgs.rocksdb.overrideAttrs (old: {
|
||||||
inherit version;
|
inherit version;
|
||||||
src = pkgs.fetchFromGitHub {
|
src = pkgs.fetchFromGitHub {
|
||||||
owner = "facebook";
|
owner = "facebook";
|
||||||
repo = "rocksdb";
|
repo = "rocksdb";
|
||||||
rev = "v${version}";
|
rev = "v${version}";
|
||||||
hash = "sha256-/Xf0bzNJPclH9IP80QNaABfhj4IAR5LycYET18VFCXc=";
|
hash = "sha256-/Xf0bzNJPclH9IP80QNaABfhj4IAR5LycYET18VFCXc=";
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
shell = self.callPackage ./nix/shell.nix {};
|
shell = self.callPackage ./nix/shell.nix {};
|
||||||
|
|
||||||
# The Rust toolchain to use
|
# The Rust toolchain to use
|
||||||
toolchain =
|
toolchain = inputs
|
||||||
inputs
|
|
||||||
.fenix
|
.fenix
|
||||||
.packages
|
.packages
|
||||||
.${pkgs.pkgsBuildHost.system}
|
.${pkgs.pkgsBuildHost.system}
|
||||||
|
@ -59,27 +59,26 @@
|
||||||
file = ./rust-toolchain.toml;
|
file = ./rust-toolchain.toml;
|
||||||
|
|
||||||
# See also `rust-toolchain.toml`
|
# See also `rust-toolchain.toml`
|
||||||
sha256 = "sha256-opUgs6ckUQCyDxcB9Wy51pqhd0MPGHUVbwRKKPGiwZU=";
|
sha256 = "sha256-Ngiz76YP4HTY75GGdH2P+APE/DEIx2R/Dn+BwwOyzZU=";
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
in
|
in
|
||||||
inputs.flake-utils.lib.eachDefaultSystem (
|
inputs.flake-utils.lib.eachDefaultSystem (system:
|
||||||
system: let
|
let
|
||||||
pkgs = inputs.nixpkgs.legacyPackages.${system};
|
pkgs = inputs.nixpkgs.legacyPackages.${system};
|
||||||
in {
|
in
|
||||||
packages =
|
{
|
||||||
{
|
packages = {
|
||||||
default = (mkScope pkgs).default;
|
default = (mkScope pkgs).default;
|
||||||
oci-image = (mkScope pkgs).oci-image;
|
oci-image = (mkScope pkgs).oci-image;
|
||||||
book = (mkScope pkgs).book;
|
book = (mkScope pkgs).book;
|
||||||
}
|
}
|
||||||
// builtins.listToAttrs
|
//
|
||||||
(
|
builtins.listToAttrs
|
||||||
builtins.concatLists
|
(builtins.concatLists
|
||||||
(
|
(builtins.map
|
||||||
builtins.map
|
(crossSystem:
|
||||||
(
|
let
|
||||||
crossSystem: let
|
|
||||||
binaryName = "static-${crossSystem}";
|
binaryName = "static-${crossSystem}";
|
||||||
pkgsCrossStatic =
|
pkgsCrossStatic =
|
||||||
(import inputs.nixpkgs {
|
(import inputs.nixpkgs {
|
||||||
|
@ -87,9 +86,9 @@
|
||||||
crossSystem = {
|
crossSystem = {
|
||||||
config = crossSystem;
|
config = crossSystem;
|
||||||
};
|
};
|
||||||
})
|
}).pkgsStatic;
|
||||||
.pkgsStatic;
|
in
|
||||||
in [
|
[
|
||||||
# An output for a statically-linked binary
|
# An output for a statically-linked binary
|
||||||
{
|
{
|
||||||
name = binaryName;
|
name = binaryName;
|
||||||
|
|
|
@ -1,29 +1,27 @@
|
||||||
# Keep sorted
|
# Keep sorted
|
||||||
{
|
{ default
|
||||||
default,
|
, inputs
|
||||||
inputs,
|
, mdbook
|
||||||
mdbook,
|
, stdenv
|
||||||
stdenv,
|
|
||||||
}:
|
}:
|
||||||
|
|
||||||
stdenv.mkDerivation {
|
stdenv.mkDerivation {
|
||||||
pname = "${default.pname}-book";
|
pname = "${default.pname}-book";
|
||||||
version = default.version;
|
version = default.version;
|
||||||
|
|
||||||
src = let
|
|
||||||
filter = inputs.nix-filter.lib;
|
|
||||||
in
|
|
||||||
filter {
|
|
||||||
root = inputs.self;
|
|
||||||
|
|
||||||
# Keep sorted
|
src = let filter = inputs.nix-filter.lib; in filter {
|
||||||
include = [
|
root = inputs.self;
|
||||||
"book.toml"
|
|
||||||
"conduit-example.toml"
|
# Keep sorted
|
||||||
"debian/README.md"
|
include = [
|
||||||
"docs"
|
"book.toml"
|
||||||
"README.md"
|
"conduit-example.toml"
|
||||||
];
|
"debian/README.md"
|
||||||
};
|
"docs"
|
||||||
|
"README.md"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
nativeBuildInputs = [
|
nativeBuildInputs = [
|
||||||
mdbook
|
mdbook
|
||||||
|
|
|
@ -1,93 +1,100 @@
|
||||||
{
|
{ lib
|
||||||
lib,
|
, pkgsBuildHost
|
||||||
pkgsBuildHost,
|
, rust
|
||||||
rust,
|
, stdenv
|
||||||
stdenv,
|
|
||||||
}:
|
}:
|
||||||
|
|
||||||
lib.optionalAttrs stdenv.hostPlatform.isStatic {
|
lib.optionalAttrs stdenv.hostPlatform.isStatic {
|
||||||
ROCKSDB_STATIC = "";
|
ROCKSDB_STATIC = "";
|
||||||
}
|
}
|
||||||
// {
|
//
|
||||||
|
{
|
||||||
CARGO_BUILD_RUSTFLAGS =
|
CARGO_BUILD_RUSTFLAGS =
|
||||||
lib.concatStringsSep
|
lib.concatStringsSep
|
||||||
" "
|
" "
|
||||||
(
|
([]
|
||||||
[]
|
# This disables PIE for static builds, which isn't great in terms of
|
||||||
# This disables PIE for static builds, which isn't great in terms of
|
# security. Unfortunately, my hand is forced because nixpkgs'
|
||||||
# security. Unfortunately, my hand is forced because nixpkgs'
|
# `libstdc++.a` is built without `-fPIE`, which precludes us from
|
||||||
# `libstdc++.a` is built without `-fPIE`, which precludes us from
|
# leaving PIE enabled.
|
||||||
# leaving PIE enabled.
|
++ lib.optionals
|
||||||
++ lib.optionals
|
stdenv.hostPlatform.isStatic
|
||||||
stdenv.hostPlatform.isStatic
|
[ "-C" "relocation-model=static" ]
|
||||||
["-C" "relocation-model=static"]
|
++ lib.optionals
|
||||||
++ lib.optionals
|
(stdenv.buildPlatform.config != stdenv.hostPlatform.config)
|
||||||
(stdenv.buildPlatform.config != stdenv.hostPlatform.config)
|
[ "-l" "c" ]
|
||||||
["-l" "c"]
|
++ lib.optionals
|
||||||
++ lib.optionals
|
# This check has to match the one [here][0]. We only need to set
|
||||||
# This check has to match the one [here][0]. We only need to set
|
# these flags when using a different linker. Don't ask me why, though,
|
||||||
# these flags when using a different linker. Don't ask me why, though,
|
# because I don't know. All I know is it breaks otherwise.
|
||||||
# because I don't know. All I know is it breaks otherwise.
|
#
|
||||||
#
|
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L37-L40
|
||||||
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L37-L40
|
(
|
||||||
(
|
# Nixpkgs doesn't check for x86_64 here but we do, because I
|
||||||
# Nixpkgs doesn't check for x86_64 here but we do, because I
|
# observed a failure building statically for x86_64 without
|
||||||
# observed a failure building statically for x86_64 without
|
# including it here. Linkers are weird.
|
||||||
# including it here. Linkers are weird.
|
(stdenv.hostPlatform.isAarch64 || stdenv.hostPlatform.isx86_64)
|
||||||
(stdenv.hostPlatform.isAarch64 || stdenv.hostPlatform.isx86_64)
|
&& stdenv.hostPlatform.isStatic
|
||||||
&& stdenv.hostPlatform.isStatic
|
&& !stdenv.isDarwin
|
||||||
&& !stdenv.isDarwin
|
&& !stdenv.cc.bintools.isLLVM
|
||||||
&& !stdenv.cc.bintools.isLLVM
|
)
|
||||||
)
|
[
|
||||||
[
|
"-l"
|
||||||
"-l"
|
"stdc++"
|
||||||
"stdc++"
|
"-L"
|
||||||
"-L"
|
"${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib"
|
||||||
"${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib"
|
]
|
||||||
]
|
);
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# What follows is stolen from [here][0]. Its purpose is to properly configure
|
# What follows is stolen from [here][0]. Its purpose is to properly configure
|
||||||
# compilers and linkers for various stages of the build, and even covers the
|
# compilers and linkers for various stages of the build, and even covers the
|
||||||
# case of build scripts that need native code compiled and run on the build
|
# case of build scripts that need native code compiled and run on the build
|
||||||
# platform (I think).
|
# platform (I think).
|
||||||
#
|
#
|
||||||
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L57-L80
|
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L57-L80
|
||||||
// (
|
//
|
||||||
|
(
|
||||||
let
|
let
|
||||||
inherit (rust.lib) envVars;
|
inherit (rust.lib) envVars;
|
||||||
in
|
in
|
||||||
lib.optionalAttrs
|
lib.optionalAttrs
|
||||||
(stdenv.targetPlatform.rust.rustcTarget
|
(stdenv.targetPlatform.rust.rustcTarget
|
||||||
!= stdenv.hostPlatform.rust.rustcTarget)
|
!= stdenv.hostPlatform.rust.rustcTarget)
|
||||||
(
|
(
|
||||||
let
|
let
|
||||||
inherit (stdenv.targetPlatform.rust) cargoEnvVarTarget;
|
inherit (stdenv.targetPlatform.rust) cargoEnvVarTarget;
|
||||||
in {
|
in
|
||||||
|
{
|
||||||
"CC_${cargoEnvVarTarget}" = envVars.ccForTarget;
|
"CC_${cargoEnvVarTarget}" = envVars.ccForTarget;
|
||||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget;
|
"CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget;
|
||||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" =
|
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" =
|
||||||
envVars.linkerForTarget;
|
envVars.linkerForTarget;
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
// (
|
//
|
||||||
let
|
(
|
||||||
inherit (stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget;
|
let
|
||||||
in {
|
inherit (stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget;
|
||||||
"CC_${cargoEnvVarTarget}" = envVars.ccForHost;
|
in
|
||||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForHost;
|
{
|
||||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost;
|
"CC_${cargoEnvVarTarget}" = envVars.ccForHost;
|
||||||
CARGO_BUILD_TARGET = rustcTarget;
|
"CXX_${cargoEnvVarTarget}" = envVars.cxxForHost;
|
||||||
}
|
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost;
|
||||||
)
|
CARGO_BUILD_TARGET = rustcTarget;
|
||||||
// (
|
}
|
||||||
let
|
)
|
||||||
inherit (stdenv.buildPlatform.rust) cargoEnvVarTarget;
|
//
|
||||||
in {
|
(
|
||||||
"CC_${cargoEnvVarTarget}" = envVars.ccForBuild;
|
let
|
||||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild;
|
inherit (stdenv.buildPlatform.rust) cargoEnvVarTarget;
|
||||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild;
|
in
|
||||||
HOST_CC = "${pkgsBuildHost.stdenv.cc}/bin/cc";
|
{
|
||||||
HOST_CXX = "${pkgsBuildHost.stdenv.cc}/bin/c++";
|
"CC_${cargoEnvVarTarget}" = envVars.ccForBuild;
|
||||||
}
|
"CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild;
|
||||||
)
|
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild;
|
||||||
|
HOST_CC = "${pkgsBuildHost.stdenv.cc}/bin/cc";
|
||||||
|
HOST_CXX = "${pkgsBuildHost.stdenv.cc}/bin/c++";
|
||||||
|
}
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,42 +1,43 @@
|
||||||
# Dependencies (keep sorted)
|
# Dependencies (keep sorted)
|
||||||
{
|
{ craneLib
|
||||||
craneLib,
|
, inputs
|
||||||
inputs,
|
, lib
|
||||||
lib,
|
, pkgsBuildHost
|
||||||
pkgsBuildHost,
|
, rocksdb
|
||||||
rocksdb,
|
, rust
|
||||||
rust,
|
, stdenv
|
||||||
stdenv,
|
|
||||||
# Options (keep sorted)
|
# Options (keep sorted)
|
||||||
default-features ? true,
|
, default-features ? true
|
||||||
features ? [],
|
, features ? []
|
||||||
profile ? "release",
|
, profile ? "release"
|
||||||
}: let
|
}:
|
||||||
buildDepsOnlyEnv = let
|
|
||||||
rocksdb' = rocksdb.override {
|
let
|
||||||
enableJemalloc = builtins.elem "jemalloc" features;
|
buildDepsOnlyEnv =
|
||||||
};
|
let
|
||||||
in
|
rocksdb' = rocksdb.override {
|
||||||
|
enableJemalloc = builtins.elem "jemalloc" features;
|
||||||
|
};
|
||||||
|
in
|
||||||
{
|
{
|
||||||
NIX_OUTPATH_USED_AS_RANDOM_SEED = "randomseed"; # https://crane.dev/faq/rebuilds-bindgen.html
|
NIX_OUTPATH_USED_AS_RANDOM_SEED = "randomseed"; # https://crane.dev/faq/rebuilds-bindgen.html
|
||||||
ROCKSDB_INCLUDE_DIR = "${rocksdb'}/include";
|
ROCKSDB_INCLUDE_DIR = "${rocksdb'}/include";
|
||||||
ROCKSDB_LIB_DIR = "${rocksdb'}/lib";
|
ROCKSDB_LIB_DIR = "${rocksdb'}/lib";
|
||||||
}
|
}
|
||||||
// (import ./cross-compilation-env.nix {
|
//
|
||||||
|
(import ./cross-compilation-env.nix {
|
||||||
# Keep sorted
|
# Keep sorted
|
||||||
inherit
|
inherit
|
||||||
lib
|
lib
|
||||||
pkgsBuildHost
|
pkgsBuildHost
|
||||||
rust
|
rust
|
||||||
stdenv
|
stdenv;
|
||||||
;
|
|
||||||
});
|
});
|
||||||
|
|
||||||
buildPackageEnv =
|
buildPackageEnv = {
|
||||||
{
|
CONDUIT_VERSION_EXTRA = inputs.self.shortRev or inputs.self.dirtyShortRev;
|
||||||
CONDUIT_VERSION_EXTRA = inputs.self.shortRev or inputs.self.dirtyShortRev;
|
} // buildDepsOnlyEnv;
|
||||||
}
|
|
||||||
// buildDepsOnlyEnv;
|
|
||||||
|
|
||||||
commonAttrs = {
|
commonAttrs = {
|
||||||
inherit
|
inherit
|
||||||
|
@ -44,22 +45,18 @@
|
||||||
cargoToml = "${inputs.self}/Cargo.toml";
|
cargoToml = "${inputs.self}/Cargo.toml";
|
||||||
})
|
})
|
||||||
pname
|
pname
|
||||||
version
|
version;
|
||||||
;
|
|
||||||
|
|
||||||
src = let
|
src = let filter = inputs.nix-filter.lib; in filter {
|
||||||
filter = inputs.nix-filter.lib;
|
root = inputs.self;
|
||||||
in
|
|
||||||
filter {
|
|
||||||
root = inputs.self;
|
|
||||||
|
|
||||||
# Keep sorted
|
# Keep sorted
|
||||||
include = [
|
include = [
|
||||||
"Cargo.lock"
|
"Cargo.lock"
|
||||||
"Cargo.toml"
|
"Cargo.toml"
|
||||||
"src"
|
"src"
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
|
||||||
nativeBuildInputs = [
|
nativeBuildInputs = [
|
||||||
# bindgen needs the build platform's libclang. Apparently due to "splicing
|
# bindgen needs the build platform's libclang. Apparently due to "splicing
|
||||||
|
@ -71,31 +68,28 @@
|
||||||
CARGO_PROFILE = profile;
|
CARGO_PROFILE = profile;
|
||||||
};
|
};
|
||||||
in
|
in
|
||||||
craneLib.buildPackage (commonAttrs
|
|
||||||
// {
|
|
||||||
cargoArtifacts = craneLib.buildDepsOnly (commonAttrs
|
|
||||||
// {
|
|
||||||
env = buildDepsOnlyEnv;
|
|
||||||
});
|
|
||||||
|
|
||||||
cargoExtraArgs =
|
craneLib.buildPackage ( commonAttrs // {
|
||||||
"--locked "
|
cargoArtifacts = craneLib.buildDepsOnly (commonAttrs // {
|
||||||
+ lib.optionalString
|
env = buildDepsOnlyEnv;
|
||||||
(!default-features)
|
});
|
||||||
"--no-default-features "
|
|
||||||
+ lib.optionalString
|
|
||||||
(features != [])
|
|
||||||
"--features "
|
|
||||||
+ (builtins.concatStringsSep "," features);
|
|
||||||
|
|
||||||
# This is redundant with CI
|
cargoExtraArgs = "--locked "
|
||||||
doCheck = false;
|
+ lib.optionalString
|
||||||
|
(!default-features)
|
||||||
|
"--no-default-features "
|
||||||
|
+ lib.optionalString
|
||||||
|
(features != [])
|
||||||
|
"--features " + (builtins.concatStringsSep "," features);
|
||||||
|
|
||||||
env = buildPackageEnv;
|
# This is redundant with CI
|
||||||
|
doCheck = false;
|
||||||
|
|
||||||
passthru = {
|
env = buildPackageEnv;
|
||||||
env = buildPackageEnv;
|
|
||||||
};
|
|
||||||
|
|
||||||
meta.mainProgram = commonAttrs.pname;
|
passthru = {
|
||||||
})
|
env = buildPackageEnv;
|
||||||
|
};
|
||||||
|
|
||||||
|
meta.mainProgram = commonAttrs.pname;
|
||||||
|
})
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
# Keep sorted
|
# Keep sorted
|
||||||
{
|
{ default
|
||||||
default,
|
, dockerTools
|
||||||
dockerTools,
|
, lib
|
||||||
lib,
|
, tini
|
||||||
tini,
|
|
||||||
}:
|
}:
|
||||||
|
|
||||||
dockerTools.buildImage {
|
dockerTools.buildImage {
|
||||||
name = default.pname;
|
name = default.pname;
|
||||||
tag = "next";
|
tag = "next";
|
||||||
|
|
100
nix/shell.nix
100
nix/shell.nix
|
@ -1,69 +1,61 @@
|
||||||
# Keep sorted
|
# Keep sorted
|
||||||
{
|
{ cargo-deb
|
||||||
alejandra,
|
, default
|
||||||
cargo-deb,
|
, engage
|
||||||
default,
|
, go
|
||||||
engage,
|
, inputs
|
||||||
go,
|
, jq
|
||||||
inputs,
|
, lychee
|
||||||
jq,
|
, mdbook
|
||||||
lychee,
|
, mkShell
|
||||||
mdbook,
|
, olm
|
||||||
mkShell,
|
, system
|
||||||
olm,
|
, taplo
|
||||||
system,
|
, toolchain
|
||||||
taplo,
|
|
||||||
toolchain,
|
|
||||||
}:
|
}:
|
||||||
|
|
||||||
mkShell {
|
mkShell {
|
||||||
env =
|
env = default.env // {
|
||||||
default.env
|
# Rust Analyzer needs to be able to find the path to default crate
|
||||||
// {
|
# sources, and it can read this environment variable to do so. The
|
||||||
# Rust Analyzer needs to be able to find the path to default crate
|
# `rust-src` component is required in order for this to work.
|
||||||
# sources, and it can read this environment variable to do so. The
|
RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library";
|
||||||
# `rust-src` component is required in order for this to work.
|
};
|
||||||
RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library";
|
|
||||||
};
|
|
||||||
|
|
||||||
# Development tools
|
# Development tools
|
||||||
nativeBuildInputs =
|
nativeBuildInputs = [
|
||||||
default.nativeBuildInputs
|
# Always use nightly rustfmt because most of its options are unstable
|
||||||
++ [
|
#
|
||||||
# Always use nightly rustfmt because most of its options are unstable
|
# This needs to come before `toolchain` in this list, otherwise
|
||||||
#
|
# `$PATH` will have stable rustfmt instead.
|
||||||
# This needs to come before `toolchain` in this list, otherwise
|
inputs.fenix.packages.${system}.latest.rustfmt
|
||||||
# `$PATH` will have stable rustfmt instead.
|
|
||||||
inputs.fenix.packages.${system}.latest.rustfmt
|
|
||||||
|
|
||||||
# rust itself
|
# rust itself
|
||||||
toolchain
|
toolchain
|
||||||
|
|
||||||
# CI tests
|
# CI tests
|
||||||
engage
|
engage
|
||||||
|
|
||||||
# format toml files
|
# format toml files
|
||||||
taplo
|
taplo
|
||||||
|
|
||||||
# Needed for producing Debian packages
|
# Needed for producing Debian packages
|
||||||
cargo-deb
|
cargo-deb
|
||||||
|
|
||||||
# Needed for our script for Complement
|
# Needed for our script for Complement
|
||||||
jq
|
jq
|
||||||
|
|
||||||
# Needed for Complement
|
# Needed for Complement
|
||||||
go
|
go
|
||||||
olm
|
olm
|
||||||
|
|
||||||
# Needed for our script for Complement
|
# Needed for our script for Complement
|
||||||
jq
|
jq
|
||||||
|
|
||||||
# Needed for finding broken markdown links
|
# Needed for finding broken markdown links
|
||||||
lychee
|
lychee
|
||||||
|
|
||||||
# Useful for editing the book locally
|
# Useful for editing the book locally
|
||||||
mdbook
|
mdbook
|
||||||
|
] ++ default.nativeBuildInputs ;
|
||||||
# nix formatter
|
|
||||||
alejandra
|
|
||||||
];
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
#
|
#
|
||||||
# Other files that need upkeep when this changes:
|
# Other files that need upkeep when this changes:
|
||||||
#
|
#
|
||||||
# * `.gitlab-ci.yml`
|
|
||||||
# * `Cargo.toml`
|
# * `Cargo.toml`
|
||||||
# * `flake.nix`
|
# * `flake.nix`
|
||||||
#
|
#
|
||||||
|
@ -10,7 +9,7 @@
|
||||||
# If you're having trouble making the relevant changes, bug a maintainer.
|
# If you're having trouble making the relevant changes, bug a maintainer.
|
||||||
|
|
||||||
[toolchain]
|
[toolchain]
|
||||||
channel = "1.78.0"
|
channel = "1.79.0"
|
||||||
components = [
|
components = [
|
||||||
# For rust-analyzer
|
# For rust-analyzer
|
||||||
"rust-src",
|
"rust-src",
|
||||||
|
|
|
@ -75,9 +75,9 @@ pub async fn get_register_available_route(
|
||||||
/// - Creates a new account and populates it with default account data
|
/// - Creates a new account and populates it with default account data
|
||||||
/// - If `inhibit_login` is false: Creates a device and returns device id and access_token
|
/// - If `inhibit_login` is false: Creates a device and returns device id and access_token
|
||||||
pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<register::v3::Response> {
|
pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<register::v3::Response> {
|
||||||
if !services().globals.allow_registration() && body.appservice_info.is_none() {
|
if !services().globals.allow_registration().await && body.appservice_info.is_none() {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"Registration has been disabled.",
|
"Registration has been disabled.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -315,7 +315,11 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
|
||||||
pub async fn change_password_route(
|
pub async fn change_password_route(
|
||||||
body: Ruma<change_password::v3::Request>,
|
body: Ruma<change_password::v3::Request>,
|
||||||
) -> Result<change_password::v3::Response> {
|
) -> Result<change_password::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body
|
||||||
|
.sender_user
|
||||||
|
.as_ref()
|
||||||
|
// In the future password changes could be performed with UIA with 3PIDs, but we don't support that currently
|
||||||
|
.ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?;
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let mut uiaainfo = UiaaInfo {
|
let mut uiaainfo = UiaaInfo {
|
||||||
|
@ -402,7 +406,11 @@ pub async fn whoami_route(body: Ruma<whoami::v3::Request>) -> Result<whoami::v3:
|
||||||
pub async fn deactivate_route(
|
pub async fn deactivate_route(
|
||||||
body: Ruma<deactivate::v3::Request>,
|
body: Ruma<deactivate::v3::Request>,
|
||||||
) -> Result<deactivate::v3::Response> {
|
) -> Result<deactivate::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body
|
||||||
|
.sender_user
|
||||||
|
.as_ref()
|
||||||
|
// In the future password changes could be performed with UIA with SSO, but we don't support that currently
|
||||||
|
.ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?;
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let mut uiaainfo = UiaaInfo {
|
let mut uiaainfo = UiaaInfo {
|
||||||
|
@ -475,7 +483,7 @@ pub async fn request_3pid_management_token_via_email_route(
|
||||||
) -> Result<request_3pid_management_token_via_email::v3::Response> {
|
) -> Result<request_3pid_management_token_via_email::v3::Response> {
|
||||||
Err(Error::BadRequest(
|
Err(Error::BadRequest(
|
||||||
ErrorKind::ThreepidDenied,
|
ErrorKind::ThreepidDenied,
|
||||||
"Third party identifier is not allowed",
|
"Third party identifiers are currently unsupported by this server implementation",
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -489,6 +497,6 @@ pub async fn request_3pid_management_token_via_msisdn_route(
|
||||||
) -> Result<request_3pid_management_token_via_msisdn::v3::Response> {
|
) -> Result<request_3pid_management_token_via_msisdn::v3::Response> {
|
||||||
Err(Error::BadRequest(
|
Err(Error::BadRequest(
|
||||||
ErrorKind::ThreepidDenied,
|
ErrorKind::ThreepidDenied,
|
||||||
"Third party identifier is not allowed",
|
"Third party identifiers are currently unsupported by this server implementation",
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,6 +18,8 @@ use ruma::{
|
||||||
pub async fn create_alias_route(
|
pub async fn create_alias_route(
|
||||||
body: Ruma<create_alias::v3::Request>,
|
body: Ruma<create_alias::v3::Request>,
|
||||||
) -> Result<create_alias::v3::Response> {
|
) -> Result<create_alias::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if body.room_alias.server_name() != services().globals.server_name() {
|
if body.room_alias.server_name() != services().globals.server_name() {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
|
@ -55,7 +57,7 @@ pub async fn create_alias_route(
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.alias
|
.alias
|
||||||
.set_alias(&body.room_alias, &body.room_id)?;
|
.set_alias(&body.room_alias, &body.room_id, sender_user)?;
|
||||||
|
|
||||||
Ok(create_alias::v3::Response::new())
|
Ok(create_alias::v3::Response::new())
|
||||||
}
|
}
|
||||||
|
@ -64,11 +66,12 @@ pub async fn create_alias_route(
|
||||||
///
|
///
|
||||||
/// Deletes a room alias from this server.
|
/// Deletes a room alias from this server.
|
||||||
///
|
///
|
||||||
/// - TODO: additional access control checks
|
|
||||||
/// - TODO: Update canonical alias event
|
/// - TODO: Update canonical alias event
|
||||||
pub async fn delete_alias_route(
|
pub async fn delete_alias_route(
|
||||||
body: Ruma<delete_alias::v3::Request>,
|
body: Ruma<delete_alias::v3::Request>,
|
||||||
) -> Result<delete_alias::v3::Response> {
|
) -> Result<delete_alias::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if body.room_alias.server_name() != services().globals.server_name() {
|
if body.room_alias.server_name() != services().globals.server_name() {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
|
@ -94,7 +97,10 @@ pub async fn delete_alias_route(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
services().rooms.alias.remove_alias(&body.room_alias)?;
|
services()
|
||||||
|
.rooms
|
||||||
|
.alias
|
||||||
|
.remove_alias(&body.room_alias, sender_user)?;
|
||||||
|
|
||||||
// TODO: update alt_aliases?
|
// TODO: update alt_aliases?
|
||||||
|
|
||||||
|
|
|
@ -54,7 +54,7 @@ pub async fn get_context_route(
|
||||||
.user_can_see_event(sender_user, &room_id, &body.event_id)?
|
.user_can_see_event(sender_user, &room_id, &body.event_id)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"You don't have permission to view this event.",
|
"You don't have permission to view this event.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,12 +1,24 @@
|
||||||
|
// Unauthenticated media is deprecated
|
||||||
|
#![allow(deprecated)]
|
||||||
|
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use crate::{service::media::FileMeta, services, utils, Error, Result, Ruma};
|
use crate::{service::media::FileMeta, services, utils, Error, Result, Ruma};
|
||||||
use ruma::api::client::{
|
use http::header::{CONTENT_DISPOSITION, CONTENT_TYPE};
|
||||||
error::ErrorKind,
|
use ruma::{
|
||||||
media::{
|
api::{
|
||||||
create_content, get_content, get_content_as_filename, get_content_thumbnail,
|
client::{
|
||||||
get_media_config,
|
authenticated_media::{
|
||||||
|
get_content, get_content_as_filename, get_content_thumbnail, get_media_config,
|
||||||
|
},
|
||||||
|
error::ErrorKind,
|
||||||
|
media::{self, create_content},
|
||||||
|
},
|
||||||
|
federation::authenticated_media::{self as federation_media, FileOrLocation},
|
||||||
},
|
},
|
||||||
|
http_headers::{ContentDisposition, ContentDispositionType},
|
||||||
|
media::Method,
|
||||||
|
ServerName, UInt,
|
||||||
};
|
};
|
||||||
|
|
||||||
const MXC_LENGTH: usize = 32;
|
const MXC_LENGTH: usize = 32;
|
||||||
|
@ -15,19 +27,22 @@ const MXC_LENGTH: usize = 32;
|
||||||
///
|
///
|
||||||
/// Returns max upload size.
|
/// Returns max upload size.
|
||||||
pub async fn get_media_config_route(
|
pub async fn get_media_config_route(
|
||||||
_body: Ruma<get_media_config::v3::Request>,
|
_body: Ruma<media::get_media_config::v3::Request>,
|
||||||
) -> Result<get_media_config::v3::Response> {
|
) -> Result<media::get_media_config::v3::Response> {
|
||||||
Ok(get_media_config::v3::Response {
|
Ok(media::get_media_config::v3::Response {
|
||||||
upload_size: services().globals.max_request_size().into(),
|
upload_size: services().globals.max_request_size().into(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sanitize_content_type(content_type: String) -> String {
|
/// # `GET /_matrix/client/v1/media/config`
|
||||||
if content_type == "image/jpeg" || content_type == "image/png" {
|
///
|
||||||
content_type
|
/// Returns max upload size.
|
||||||
} else {
|
pub async fn get_media_config_auth_route(
|
||||||
"application/octet-stream".to_owned()
|
_body: Ruma<get_media_config::v1::Request>,
|
||||||
}
|
) -> Result<get_media_config::v1::Response> {
|
||||||
|
Ok(get_media_config::v1::Response {
|
||||||
|
upload_size: services().globals.max_request_size().into(),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `POST /_matrix/media/r0/upload`
|
/// # `POST /_matrix/media/r0/upload`
|
||||||
|
@ -49,10 +64,10 @@ pub async fn create_content_route(
|
||||||
.media
|
.media
|
||||||
.create(
|
.create(
|
||||||
mxc.clone(),
|
mxc.clone(),
|
||||||
body.filename
|
Some(
|
||||||
.as_ref()
|
ContentDisposition::new(ContentDispositionType::Inline)
|
||||||
.map(|filename| "inline; filename=".to_owned() + filename)
|
.with_filename(body.filename.clone()),
|
||||||
.as_deref(),
|
),
|
||||||
body.content_type.as_deref(),
|
body.content_type.as_deref(),
|
||||||
&body.file,
|
&body.file,
|
||||||
)
|
)
|
||||||
|
@ -66,28 +81,67 @@ pub async fn create_content_route(
|
||||||
|
|
||||||
pub async fn get_remote_content(
|
pub async fn get_remote_content(
|
||||||
mxc: &str,
|
mxc: &str,
|
||||||
server_name: &ruma::ServerName,
|
server_name: &ServerName,
|
||||||
media_id: String,
|
media_id: String,
|
||||||
) -> Result<get_content::v3::Response, Error> {
|
) -> Result<get_content::v1::Response, Error> {
|
||||||
let content_response = services()
|
let content_response = match services()
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
server_name,
|
server_name,
|
||||||
get_content::v3::Request {
|
federation_media::get_content::v1::Request {
|
||||||
allow_remote: false,
|
media_id: media_id.clone(),
|
||||||
server_name: server_name.to_owned(),
|
|
||||||
media_id,
|
|
||||||
timeout_ms: Duration::from_secs(20),
|
timeout_ms: Duration::from_secs(20),
|
||||||
allow_redirect: false,
|
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await?;
|
.await
|
||||||
|
{
|
||||||
|
Ok(federation_media::get_content::v1::Response {
|
||||||
|
metadata: _,
|
||||||
|
content: FileOrLocation::File(content),
|
||||||
|
}) => get_content::v1::Response {
|
||||||
|
file: content.file,
|
||||||
|
content_type: content.content_type,
|
||||||
|
content_disposition: content.content_disposition,
|
||||||
|
},
|
||||||
|
|
||||||
|
Ok(federation_media::get_content::v1::Response {
|
||||||
|
metadata: _,
|
||||||
|
content: FileOrLocation::Location(url),
|
||||||
|
}) => get_location_content(url).await?,
|
||||||
|
Err(Error::BadRequest(ErrorKind::Unrecognized, _)) => {
|
||||||
|
let media::get_content::v3::Response {
|
||||||
|
file,
|
||||||
|
content_type,
|
||||||
|
content_disposition,
|
||||||
|
..
|
||||||
|
} = services()
|
||||||
|
.sending
|
||||||
|
.send_federation_request(
|
||||||
|
server_name,
|
||||||
|
media::get_content::v3::Request {
|
||||||
|
server_name: server_name.to_owned(),
|
||||||
|
media_id,
|
||||||
|
timeout_ms: Duration::from_secs(20),
|
||||||
|
allow_remote: false,
|
||||||
|
allow_redirect: true,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
get_content::v1::Response {
|
||||||
|
file,
|
||||||
|
content_type,
|
||||||
|
content_disposition,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => return Err(e),
|
||||||
|
};
|
||||||
|
|
||||||
services()
|
services()
|
||||||
.media
|
.media
|
||||||
.create(
|
.create(
|
||||||
mxc.to_owned(),
|
mxc.to_owned(),
|
||||||
content_response.content_disposition.as_deref(),
|
content_response.content_disposition.clone(),
|
||||||
content_response.content_type.as_deref(),
|
content_response.content_type.as_deref(),
|
||||||
&content_response.file,
|
&content_response.file,
|
||||||
)
|
)
|
||||||
|
@ -102,31 +156,57 @@ pub async fn get_remote_content(
|
||||||
///
|
///
|
||||||
/// - Only allows federation if `allow_remote` is true
|
/// - Only allows federation if `allow_remote` is true
|
||||||
pub async fn get_content_route(
|
pub async fn get_content_route(
|
||||||
body: Ruma<get_content::v3::Request>,
|
body: Ruma<media::get_content::v3::Request>,
|
||||||
) -> Result<get_content::v3::Response> {
|
) -> Result<media::get_content::v3::Response> {
|
||||||
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
let get_content::v1::Response {
|
||||||
|
|
||||||
if let Some(FileMeta {
|
|
||||||
content_disposition,
|
|
||||||
file,
|
file,
|
||||||
..
|
content_disposition,
|
||||||
}) = services().media.get(mxc.clone()).await?
|
content_type,
|
||||||
{
|
} = get_content(&body.server_name, body.media_id.clone(), body.allow_remote).await?;
|
||||||
Ok(get_content::v3::Response {
|
|
||||||
file,
|
|
||||||
content_type: Some("application/octet-stream".to_owned()),
|
|
||||||
content_disposition,
|
|
||||||
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
|
||||||
})
|
|
||||||
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
|
|
||||||
let remote_content_response =
|
|
||||||
get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?;
|
|
||||||
|
|
||||||
Ok(get_content::v3::Response {
|
Ok(media::get_content::v3::Response {
|
||||||
|
file,
|
||||||
|
content_type,
|
||||||
|
content_disposition,
|
||||||
|
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}`
|
||||||
|
///
|
||||||
|
/// Load media from our server or over federation.
|
||||||
|
pub async fn get_content_auth_route(
|
||||||
|
body: Ruma<get_content::v1::Request>,
|
||||||
|
) -> Result<get_content::v1::Response> {
|
||||||
|
get_content(&body.server_name, body.media_id.clone(), true).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_content(
|
||||||
|
server_name: &ServerName,
|
||||||
|
media_id: String,
|
||||||
|
allow_remote: bool,
|
||||||
|
) -> Result<get_content::v1::Response, Error> {
|
||||||
|
let mxc = format!("mxc://{}/{}", server_name, media_id);
|
||||||
|
|
||||||
|
if let Ok(Some(FileMeta {
|
||||||
|
content_disposition,
|
||||||
|
content_type,
|
||||||
|
file,
|
||||||
|
})) = services().media.get(mxc.clone()).await
|
||||||
|
{
|
||||||
|
Ok(get_content::v1::Response {
|
||||||
|
file,
|
||||||
|
content_type,
|
||||||
|
content_disposition: Some(content_disposition),
|
||||||
|
})
|
||||||
|
} else if server_name != services().globals.server_name() && allow_remote {
|
||||||
|
let remote_content_response =
|
||||||
|
get_remote_content(&mxc, server_name, media_id.clone()).await?;
|
||||||
|
|
||||||
|
Ok(get_content::v1::Response {
|
||||||
content_disposition: remote_content_response.content_disposition,
|
content_disposition: remote_content_response.content_disposition,
|
||||||
content_type: Some("application/octet-stream".to_owned()),
|
content_type: remote_content_response.content_type,
|
||||||
file: remote_content_response.file,
|
file: remote_content_response.file,
|
||||||
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
||||||
|
@ -139,26 +219,74 @@ pub async fn get_content_route(
|
||||||
///
|
///
|
||||||
/// - Only allows federation if `allow_remote` is true
|
/// - Only allows federation if `allow_remote` is true
|
||||||
pub async fn get_content_as_filename_route(
|
pub async fn get_content_as_filename_route(
|
||||||
body: Ruma<get_content_as_filename::v3::Request>,
|
body: Ruma<media::get_content_as_filename::v3::Request>,
|
||||||
) -> Result<get_content_as_filename::v3::Response> {
|
) -> Result<media::get_content_as_filename::v3::Response> {
|
||||||
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
let get_content_as_filename::v1::Response {
|
||||||
|
file,
|
||||||
|
content_type,
|
||||||
|
content_disposition,
|
||||||
|
} = get_content_as_filename(
|
||||||
|
&body.server_name,
|
||||||
|
body.media_id.clone(),
|
||||||
|
body.filename.clone(),
|
||||||
|
body.allow_remote,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
if let Some(FileMeta { file, .. }) = services().media.get(mxc.clone()).await? {
|
Ok(media::get_content_as_filename::v3::Response {
|
||||||
Ok(get_content_as_filename::v3::Response {
|
file,
|
||||||
|
content_type,
|
||||||
|
content_disposition,
|
||||||
|
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}/{fileName}`
|
||||||
|
///
|
||||||
|
/// Load media from our server or over federation, permitting desired filename.
|
||||||
|
pub async fn get_content_as_filename_auth_route(
|
||||||
|
body: Ruma<get_content_as_filename::v1::Request>,
|
||||||
|
) -> Result<get_content_as_filename::v1::Response, Error> {
|
||||||
|
get_content_as_filename(
|
||||||
|
&body.server_name,
|
||||||
|
body.media_id.clone(),
|
||||||
|
body.filename.clone(),
|
||||||
|
true,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_content_as_filename(
|
||||||
|
server_name: &ServerName,
|
||||||
|
media_id: String,
|
||||||
|
filename: String,
|
||||||
|
allow_remote: bool,
|
||||||
|
) -> Result<get_content_as_filename::v1::Response, Error> {
|
||||||
|
let mxc = format!("mxc://{}/{}", server_name, media_id);
|
||||||
|
|
||||||
|
if let Ok(Some(FileMeta {
|
||||||
|
file, content_type, ..
|
||||||
|
})) = services().media.get(mxc.clone()).await
|
||||||
|
{
|
||||||
|
Ok(get_content_as_filename::v1::Response {
|
||||||
file,
|
file,
|
||||||
content_type: Some("application/octet-stream".to_owned()),
|
content_type,
|
||||||
content_disposition: Some(format!("inline; filename={}", body.filename)),
|
content_disposition: Some(
|
||||||
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
ContentDisposition::new(ContentDispositionType::Inline)
|
||||||
|
.with_filename(Some(filename.clone())),
|
||||||
|
),
|
||||||
})
|
})
|
||||||
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
|
} else if server_name != services().globals.server_name() && allow_remote {
|
||||||
let remote_content_response =
|
let remote_content_response =
|
||||||
get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?;
|
get_remote_content(&mxc, server_name, media_id.clone()).await?;
|
||||||
|
|
||||||
Ok(get_content_as_filename::v3::Response {
|
Ok(get_content_as_filename::v1::Response {
|
||||||
content_disposition: Some(format!("inline: filename={}", body.filename)),
|
content_disposition: Some(
|
||||||
content_type: Some("application/octet-stream".to_owned()),
|
ContentDisposition::new(ContentDispositionType::Inline)
|
||||||
|
.with_filename(Some(filename.clone())),
|
||||||
|
),
|
||||||
|
content_type: remote_content_response.content_type,
|
||||||
file: remote_content_response.file,
|
file: remote_content_response.file,
|
||||||
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
||||||
|
@ -171,66 +299,169 @@ pub async fn get_content_as_filename_route(
|
||||||
///
|
///
|
||||||
/// - Only allows federation if `allow_remote` is true
|
/// - Only allows federation if `allow_remote` is true
|
||||||
pub async fn get_content_thumbnail_route(
|
pub async fn get_content_thumbnail_route(
|
||||||
body: Ruma<get_content_thumbnail::v3::Request>,
|
body: Ruma<media::get_content_thumbnail::v3::Request>,
|
||||||
) -> Result<get_content_thumbnail::v3::Response> {
|
) -> Result<media::get_content_thumbnail::v3::Response> {
|
||||||
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
let get_content_thumbnail::v1::Response { file, content_type } = get_content_thumbnail(
|
||||||
|
&body.server_name,
|
||||||
|
body.media_id.clone(),
|
||||||
|
body.height,
|
||||||
|
body.width,
|
||||||
|
body.method.clone(),
|
||||||
|
body.animated,
|
||||||
|
body.allow_remote,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
if let Some(FileMeta {
|
Ok(media::get_content_thumbnail::v3::Response {
|
||||||
|
file,
|
||||||
|
content_type,
|
||||||
|
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/v1/media/thumbnail/{serverName}/{mediaId}`
|
||||||
|
///
|
||||||
|
/// Load media thumbnail from our server or over federation.
|
||||||
|
pub async fn get_content_thumbnail_auth_route(
|
||||||
|
body: Ruma<get_content_thumbnail::v1::Request>,
|
||||||
|
) -> Result<get_content_thumbnail::v1::Response> {
|
||||||
|
get_content_thumbnail(
|
||||||
|
&body.server_name,
|
||||||
|
body.media_id.clone(),
|
||||||
|
body.height,
|
||||||
|
body.width,
|
||||||
|
body.method.clone(),
|
||||||
|
body.animated,
|
||||||
|
true,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_content_thumbnail(
|
||||||
|
server_name: &ServerName,
|
||||||
|
media_id: String,
|
||||||
|
height: UInt,
|
||||||
|
width: UInt,
|
||||||
|
method: Option<Method>,
|
||||||
|
animated: Option<bool>,
|
||||||
|
allow_remote: bool,
|
||||||
|
) -> Result<get_content_thumbnail::v1::Response, Error> {
|
||||||
|
let mxc = format!("mxc://{}/{}", server_name, media_id);
|
||||||
|
|
||||||
|
if let Ok(Some(FileMeta {
|
||||||
file, content_type, ..
|
file, content_type, ..
|
||||||
}) = services()
|
})) = services()
|
||||||
.media
|
.media
|
||||||
.get_thumbnail(
|
.get_thumbnail(
|
||||||
mxc.clone(),
|
mxc.clone(),
|
||||||
body.width
|
width
|
||||||
.try_into()
|
.try_into()
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
|
||||||
body.height
|
height
|
||||||
.try_into()
|
.try_into()
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Height is invalid."))?,
|
||||||
)
|
)
|
||||||
.await?
|
.await
|
||||||
{
|
{
|
||||||
Ok(get_content_thumbnail::v3::Response {
|
Ok(get_content_thumbnail::v1::Response { file, content_type })
|
||||||
file,
|
} else if server_name != services().globals.server_name() && allow_remote {
|
||||||
content_type: content_type.map(sanitize_content_type),
|
let thumbnail_response = match services()
|
||||||
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
|
||||||
})
|
|
||||||
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
|
|
||||||
let mut get_thumbnail_response = services()
|
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
&body.server_name,
|
server_name,
|
||||||
get_content_thumbnail::v3::Request {
|
federation_media::get_content_thumbnail::v1::Request {
|
||||||
allow_remote: false,
|
height,
|
||||||
height: body.height,
|
width,
|
||||||
width: body.width,
|
method: method.clone(),
|
||||||
method: body.method.clone(),
|
media_id: media_id.clone(),
|
||||||
server_name: body.server_name.clone(),
|
|
||||||
media_id: body.media_id.clone(),
|
|
||||||
timeout_ms: Duration::from_secs(20),
|
timeout_ms: Duration::from_secs(20),
|
||||||
allow_redirect: false,
|
animated,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await?;
|
.await
|
||||||
|
{
|
||||||
|
Ok(federation_media::get_content_thumbnail::v1::Response {
|
||||||
|
metadata: _,
|
||||||
|
content: FileOrLocation::File(content),
|
||||||
|
}) => get_content_thumbnail::v1::Response {
|
||||||
|
file: content.file,
|
||||||
|
content_type: content.content_type,
|
||||||
|
},
|
||||||
|
|
||||||
|
Ok(federation_media::get_content_thumbnail::v1::Response {
|
||||||
|
metadata: _,
|
||||||
|
content: FileOrLocation::Location(url),
|
||||||
|
}) => {
|
||||||
|
let get_content::v1::Response {
|
||||||
|
file, content_type, ..
|
||||||
|
} = get_location_content(url).await?;
|
||||||
|
|
||||||
|
get_content_thumbnail::v1::Response { file, content_type }
|
||||||
|
}
|
||||||
|
Err(Error::BadRequest(ErrorKind::Unrecognized, _)) => {
|
||||||
|
let media::get_content_thumbnail::v3::Response {
|
||||||
|
file, content_type, ..
|
||||||
|
} = services()
|
||||||
|
.sending
|
||||||
|
.send_federation_request(
|
||||||
|
server_name,
|
||||||
|
media::get_content_thumbnail::v3::Request {
|
||||||
|
height,
|
||||||
|
width,
|
||||||
|
method: method.clone(),
|
||||||
|
server_name: server_name.to_owned(),
|
||||||
|
media_id: media_id.clone(),
|
||||||
|
timeout_ms: Duration::from_secs(20),
|
||||||
|
allow_redirect: false,
|
||||||
|
animated,
|
||||||
|
allow_remote: false,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
get_content_thumbnail::v1::Response { file, content_type }
|
||||||
|
}
|
||||||
|
Err(e) => return Err(e),
|
||||||
|
};
|
||||||
|
|
||||||
services()
|
services()
|
||||||
.media
|
.media
|
||||||
.upload_thumbnail(
|
.upload_thumbnail(
|
||||||
mxc,
|
mxc,
|
||||||
None,
|
thumbnail_response.content_type.as_deref(),
|
||||||
get_thumbnail_response.content_type.as_deref(),
|
width.try_into().expect("all UInts are valid u32s"),
|
||||||
body.width.try_into().expect("all UInts are valid u32s"),
|
height.try_into().expect("all UInts are valid u32s"),
|
||||||
body.height.try_into().expect("all UInts are valid u32s"),
|
&thumbnail_response.file,
|
||||||
&get_thumbnail_response.file,
|
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
get_thumbnail_response.content_type = get_thumbnail_response
|
Ok(thumbnail_response)
|
||||||
.content_type
|
|
||||||
.map(sanitize_content_type);
|
|
||||||
|
|
||||||
Ok(get_thumbnail_response)
|
|
||||||
} else {
|
} else {
|
||||||
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn get_location_content(url: String) -> Result<get_content::v1::Response, Error> {
|
||||||
|
let client = services().globals.default_client();
|
||||||
|
let response = client.get(url).send().await?;
|
||||||
|
let headers = response.headers();
|
||||||
|
|
||||||
|
let content_type = headers
|
||||||
|
.get(CONTENT_TYPE)
|
||||||
|
.and_then(|header| header.to_str().ok())
|
||||||
|
.map(ToOwned::to_owned);
|
||||||
|
|
||||||
|
let content_disposition = headers
|
||||||
|
.get(CONTENT_DISPOSITION)
|
||||||
|
.map(|header| header.as_bytes())
|
||||||
|
.map(TryFrom::try_from)
|
||||||
|
.and_then(Result::ok);
|
||||||
|
|
||||||
|
let file = response.bytes().await?.to_vec();
|
||||||
|
|
||||||
|
Ok(get_content::v1::Response {
|
||||||
|
file,
|
||||||
|
content_type,
|
||||||
|
content_disposition,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
|
@ -18,9 +18,8 @@ use ruma::{
|
||||||
},
|
},
|
||||||
StateEventType, TimelineEventType,
|
StateEventType, TimelineEventType,
|
||||||
},
|
},
|
||||||
serde::Base64,
|
state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch,
|
||||||
state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId,
|
OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, RoomVersionId, UserId,
|
||||||
OwnedServerName, OwnedUserId, RoomId, RoomVersionId, UserId,
|
|
||||||
};
|
};
|
||||||
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
||||||
use std::{
|
use std::{
|
||||||
|
@ -32,7 +31,10 @@ use tokio::sync::RwLock;
|
||||||
use tracing::{debug, error, info, warn};
|
use tracing::{debug, error, info, warn};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
service::pdu::{gen_event_id_canonical_json, PduBuilder},
|
service::{
|
||||||
|
globals::SigningKeys,
|
||||||
|
pdu::{gen_event_id_canonical_json, PduBuilder},
|
||||||
|
},
|
||||||
services, utils, Error, PduEvent, Result, Ruma,
|
services, utils, Error, PduEvent, Result, Ruma,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -95,7 +97,7 @@ pub async fn join_room_by_id_or_alias_route(
|
||||||
|
|
||||||
let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) {
|
let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) {
|
||||||
Ok(room_id) => {
|
Ok(room_id) => {
|
||||||
let mut servers = body.server_name.clone();
|
let mut servers = body.via.clone();
|
||||||
servers.extend(
|
servers.extend(
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -186,15 +188,7 @@ pub async fn kick_user_route(
|
||||||
) -> Result<kick_user::v3::Response> {
|
) -> Result<kick_user::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if let Ok(true) = services()
|
let event: RoomMemberEventContent = serde_json::from_str(
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.is_left(sender_user, &body.room_id)
|
|
||||||
{
|
|
||||||
return Ok(kick_user::v3::Response {});
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut event: RoomMemberEventContent = serde_json::from_str(
|
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
|
@ -205,15 +199,26 @@ pub async fn kick_user_route(
|
||||||
)?
|
)?
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::BadState,
|
ErrorKind::BadState,
|
||||||
"Cannot kick member that's not in the room.",
|
"Cannot kick a user who is not in the room.",
|
||||||
))?
|
))?
|
||||||
.content
|
.content
|
||||||
.get(),
|
.get(),
|
||||||
)
|
)
|
||||||
.map_err(|_| Error::bad_database("Invalid member event in database."))?;
|
.map_err(|_| Error::bad_database("Invalid member event in database."))?;
|
||||||
|
|
||||||
event.membership = MembershipState::Leave;
|
// If they are already kicked and the reason is unchanged, there isn't any point in sending a new event.
|
||||||
event.reason.clone_from(&body.reason);
|
if event.membership == MembershipState::Leave && event.reason == body.reason {
|
||||||
|
return Ok(kick_user::v3::Response {});
|
||||||
|
}
|
||||||
|
|
||||||
|
let event = RoomMemberEventContent {
|
||||||
|
is_direct: None,
|
||||||
|
membership: MembershipState::Leave,
|
||||||
|
third_party_invite: None,
|
||||||
|
reason: body.reason.clone(),
|
||||||
|
join_authorized_via_users_server: None,
|
||||||
|
..event
|
||||||
|
};
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let mutex_state = Arc::clone(
|
||||||
services()
|
services()
|
||||||
|
@ -236,6 +241,7 @@ pub async fn kick_user_route(
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some(body.user_id.to_string()),
|
state_key: Some(body.user_id.to_string()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
|
@ -254,17 +260,7 @@ pub async fn kick_user_route(
|
||||||
pub async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<ban_user::v3::Response> {
|
pub async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<ban_user::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if let Ok(Some(membership_event)) = services()
|
let event = if let Some(event) = services()
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.get_member(&body.room_id, sender_user)
|
|
||||||
{
|
|
||||||
if membership_event.membership == MembershipState::Ban {
|
|
||||||
return Ok(ban_user::v3::Response {});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let event = services()
|
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(
|
.room_state_get(
|
||||||
|
@ -272,27 +268,30 @@ pub async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<ban_use
|
||||||
&StateEventType::RoomMember,
|
&StateEventType::RoomMember,
|
||||||
body.user_id.as_ref(),
|
body.user_id.as_ref(),
|
||||||
)?
|
)?
|
||||||
.map_or(
|
// Even when the previous member content is invalid, we should let the ban go through anyways.
|
||||||
Ok(RoomMemberEventContent {
|
.and_then(|event| serde_json::from_str::<RoomMemberEventContent>(event.content.get()).ok())
|
||||||
membership: MembershipState::Ban,
|
{
|
||||||
displayname: services().users.displayname(&body.user_id)?,
|
// If they are already banned and the reason is unchanged, there isn't any point in sending a new event.
|
||||||
avatar_url: services().users.avatar_url(&body.user_id)?,
|
if event.membership == MembershipState::Ban && event.reason == body.reason {
|
||||||
is_direct: None,
|
return Ok(ban_user::v3::Response {});
|
||||||
third_party_invite: None,
|
}
|
||||||
blurhash: services().users.blurhash(&body.user_id)?,
|
|
||||||
reason: body.reason.clone(),
|
RoomMemberEventContent {
|
||||||
join_authorized_via_users_server: None,
|
membership: MembershipState::Ban,
|
||||||
}),
|
join_authorized_via_users_server: None,
|
||||||
|event| {
|
reason: body.reason.clone(),
|
||||||
serde_json::from_str(event.content.get())
|
third_party_invite: None,
|
||||||
.map(|event: RoomMemberEventContent| RoomMemberEventContent {
|
is_direct: None,
|
||||||
membership: MembershipState::Ban,
|
avatar_url: event.avatar_url,
|
||||||
join_authorized_via_users_server: None,
|
displayname: event.displayname,
|
||||||
..event
|
blurhash: event.blurhash,
|
||||||
})
|
}
|
||||||
.map_err(|_| Error::bad_database("Invalid member event in database."))
|
} else {
|
||||||
},
|
RoomMemberEventContent {
|
||||||
)?;
|
reason: body.reason.clone(),
|
||||||
|
..RoomMemberEventContent::new(MembershipState::Ban)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let mutex_state = Arc::clone(
|
||||||
services()
|
services()
|
||||||
|
@ -315,6 +314,7 @@ pub async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<ban_use
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some(body.user_id.to_string()),
|
state_key: Some(body.user_id.to_string()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
|
@ -335,17 +335,7 @@ pub async fn unban_user_route(
|
||||||
) -> Result<unban_user::v3::Response> {
|
) -> Result<unban_user::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if let Ok(Some(membership_event)) = services()
|
let event: RoomMemberEventContent = serde_json::from_str(
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.get_member(&body.room_id, sender_user)
|
|
||||||
{
|
|
||||||
if membership_event.membership != MembershipState::Ban {
|
|
||||||
return Ok(unban_user::v3::Response {});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut event: RoomMemberEventContent = serde_json::from_str(
|
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
|
@ -363,8 +353,19 @@ pub async fn unban_user_route(
|
||||||
)
|
)
|
||||||
.map_err(|_| Error::bad_database("Invalid member event in database."))?;
|
.map_err(|_| Error::bad_database("Invalid member event in database."))?;
|
||||||
|
|
||||||
event.membership = MembershipState::Leave;
|
// If they are already unbanned and the reason is unchanged, there isn't any point in sending a new event.
|
||||||
event.reason.clone_from(&body.reason);
|
if event.membership == MembershipState::Leave && event.reason == body.reason {
|
||||||
|
return Ok(unban_user::v3::Response {});
|
||||||
|
}
|
||||||
|
|
||||||
|
let event = RoomMemberEventContent {
|
||||||
|
is_direct: None,
|
||||||
|
membership: MembershipState::Leave,
|
||||||
|
third_party_invite: None,
|
||||||
|
reason: body.reason.clone(),
|
||||||
|
join_authorized_via_users_server: None,
|
||||||
|
..event
|
||||||
|
};
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let mutex_state = Arc::clone(
|
||||||
services()
|
services()
|
||||||
|
@ -387,6 +388,7 @@ pub async fn unban_user_route(
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some(body.user_id.to_string()),
|
state_key: Some(body.user_id.to_string()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
|
@ -454,7 +456,7 @@ pub async fn get_member_events_route(
|
||||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"You don't have permission to view this room.",
|
"You don't have permission to view this room.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -489,7 +491,7 @@ pub async fn joined_members_route(
|
||||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"You don't have permission to view this room.",
|
"You don't have permission to view this room.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -625,7 +627,7 @@ async fn join_room_by_id_helper(
|
||||||
let event_id = format!(
|
let event_id = format!(
|
||||||
"${}",
|
"${}",
|
||||||
ruma::signatures::reference_hash(&join_event_stub, &room_version_id)
|
ruma::signatures::reference_hash(&join_event_stub, &room_version_id)
|
||||||
.expect("ruma can calculate reference hashes")
|
.expect("Event format validated when event was hashed")
|
||||||
);
|
);
|
||||||
let event_id = <&EventId>::try_from(event_id.as_str())
|
let event_id = <&EventId>::try_from(event_id.as_str())
|
||||||
.expect("ruma's reference hashes are valid event ids");
|
.expect("ruma's reference hashes are valid event ids");
|
||||||
|
@ -939,6 +941,7 @@ async fn join_room_by_id_helper(
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some(sender_user.to_string()),
|
state_key: Some(sender_user.to_string()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
room_id,
|
room_id,
|
||||||
|
@ -984,6 +987,8 @@ async fn join_room_by_id_helper(
|
||||||
.as_str()
|
.as_str()
|
||||||
})
|
})
|
||||||
.and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok());
|
.and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok());
|
||||||
|
let restricted_join = join_authorized_via_users_server.is_some();
|
||||||
|
|
||||||
// TODO: Is origin needed?
|
// TODO: Is origin needed?
|
||||||
join_event_stub.insert(
|
join_event_stub.insert(
|
||||||
"origin".to_owned(),
|
"origin".to_owned(),
|
||||||
|
@ -1030,7 +1035,7 @@ async fn join_room_by_id_helper(
|
||||||
ruma::signatures::reference_hash(&join_event_stub, &room_version_id)
|
ruma::signatures::reference_hash(&join_event_stub, &room_version_id)
|
||||||
.expect("ruma can calculate reference hashes")
|
.expect("ruma can calculate reference hashes")
|
||||||
);
|
);
|
||||||
let event_id = <&EventId>::try_from(event_id.as_str())
|
let event_id = OwnedEventId::try_from(event_id)
|
||||||
.expect("ruma's reference hashes are valid event ids");
|
.expect("ruma's reference hashes are valid event ids");
|
||||||
|
|
||||||
// Add event_id back
|
// Add event_id back
|
||||||
|
@ -1055,43 +1060,32 @@ async fn join_room_by_id_helper(
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
if let Some(signed_raw) = send_join_response.room_state.event {
|
let pdu = if let Some(signed_raw) = send_join_response.room_state.event {
|
||||||
let (signed_event_id, signed_value) =
|
let (signed_event_id, signed_pdu) =
|
||||||
match gen_event_id_canonical_json(&signed_raw, &room_version_id) {
|
gen_event_id_canonical_json(&signed_raw, &room_version_id)?;
|
||||||
Ok(t) => t,
|
|
||||||
Err(_) => {
|
|
||||||
// Event could not be converted to canonical json
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Could not convert event to canonical json.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if signed_event_id != event_id {
|
if signed_event_id != event_id {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadServerResponse(
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Server sent event with wrong event id",
|
"Server sent event with wrong event id",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
drop(state_lock);
|
signed_pdu
|
||||||
let pub_key_map = RwLock::new(BTreeMap::new());
|
} else if restricted_join {
|
||||||
services()
|
return Err(Error::BadServerResponse(
|
||||||
.rooms
|
"No signed event was returned, despite just performing a restricted join",
|
||||||
.event_handler
|
));
|
||||||
.handle_incoming_pdu(
|
|
||||||
&remote_server,
|
|
||||||
&signed_event_id,
|
|
||||||
room_id,
|
|
||||||
signed_value,
|
|
||||||
true,
|
|
||||||
&pub_key_map,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
} else {
|
} else {
|
||||||
return Err(error);
|
join_event
|
||||||
}
|
};
|
||||||
|
|
||||||
|
drop(state_lock);
|
||||||
|
let pub_key_map = RwLock::new(BTreeMap::new());
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.event_handler
|
||||||
|
.handle_incoming_pdu(&remote_server, &event_id, room_id, pdu, true, &pub_key_map)
|
||||||
|
.await?;
|
||||||
} else {
|
} else {
|
||||||
return Err(error);
|
return Err(error);
|
||||||
}
|
}
|
||||||
|
@ -1142,7 +1136,7 @@ async fn make_join_request(
|
||||||
async fn validate_and_add_event_id(
|
async fn validate_and_add_event_id(
|
||||||
pdu: &RawJsonValue,
|
pdu: &RawJsonValue,
|
||||||
room_version: &RoomVersionId,
|
room_version: &RoomVersionId,
|
||||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>,
|
||||||
) -> Result<(OwnedEventId, CanonicalJsonObject)> {
|
) -> Result<(OwnedEventId, CanonicalJsonObject)> {
|
||||||
let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
|
let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
|
||||||
error!("Invalid PDU in server response: {:?}: {:?}", pdu, e);
|
error!("Invalid PDU in server response: {:?}: {:?}", pdu, e);
|
||||||
|
@ -1151,7 +1145,7 @@ async fn validate_and_add_event_id(
|
||||||
let event_id = EventId::parse(format!(
|
let event_id = EventId::parse(format!(
|
||||||
"${}",
|
"${}",
|
||||||
ruma::signatures::reference_hash(&value, room_version)
|
ruma::signatures::reference_hash(&value, room_version)
|
||||||
.expect("ruma can calculate reference hashes")
|
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid PDU format"))?
|
||||||
))
|
))
|
||||||
.expect("ruma's reference hashes are valid event ids");
|
.expect("ruma's reference hashes are valid event ids");
|
||||||
|
|
||||||
|
@ -1189,8 +1183,35 @@ async fn validate_and_add_event_id(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Err(e) = ruma::signatures::verify_event(&*pub_key_map.read().await, &value, room_version)
|
let origin_server_ts = value.get("origin_server_ts").ok_or_else(|| {
|
||||||
{
|
error!("Invalid PDU, no origin_server_ts field");
|
||||||
|
Error::BadRequest(
|
||||||
|
ErrorKind::MissingParam,
|
||||||
|
"Invalid PDU, no origin_server_ts field",
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let origin_server_ts: MilliSecondsSinceUnixEpoch = {
|
||||||
|
let ts = origin_server_ts.as_integer().ok_or_else(|| {
|
||||||
|
Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"origin_server_ts must be an integer",
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
MilliSecondsSinceUnixEpoch(i64::from(ts).try_into().map_err(|_| {
|
||||||
|
Error::BadRequest(ErrorKind::InvalidParam, "Time must be after the unix epoch")
|
||||||
|
})?)
|
||||||
|
};
|
||||||
|
|
||||||
|
let unfiltered_keys = (*pub_key_map.read().await).clone();
|
||||||
|
|
||||||
|
let keys =
|
||||||
|
services()
|
||||||
|
.globals
|
||||||
|
.filter_keys_server_map(unfiltered_keys, origin_server_ts, room_version);
|
||||||
|
|
||||||
|
if let Err(e) = ruma::signatures::verify_event(&keys, &value, room_version) {
|
||||||
warn!("Event {} failed verification {:?} {}", event_id, pdu, e);
|
warn!("Event {} failed verification {:?} {}", event_id, pdu, e);
|
||||||
back_off(event_id).await;
|
back_off(event_id).await;
|
||||||
return Err(Error::BadServerResponse("Event failed verification."));
|
return Err(Error::BadServerResponse("Event failed verification."));
|
||||||
|
@ -1243,6 +1264,7 @@ pub(crate) async fn invite_helper<'a>(
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some(user_id.to_string()),
|
state_key: Some(user_id.to_string()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
room_id,
|
room_id,
|
||||||
|
@ -1319,60 +1341,60 @@ pub(crate) async fn invite_helper<'a>(
|
||||||
.filter(|server| &**server != services().globals.server_name());
|
.filter(|server| &**server != services().globals.server_name());
|
||||||
|
|
||||||
services().sending.send_pdu(servers, &pdu_id)?;
|
services().sending.send_pdu(servers, &pdu_id)?;
|
||||||
|
} else {
|
||||||
|
if !services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.is_joined(sender_user, room_id)?
|
||||||
|
{
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::forbidden(),
|
||||||
|
"You don't have permission to view this room.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
return Ok(());
|
let mutex_state = Arc::clone(
|
||||||
}
|
services()
|
||||||
|
.globals
|
||||||
|
.roomid_mutex_state
|
||||||
|
.write()
|
||||||
|
.await
|
||||||
|
.entry(room_id.to_owned())
|
||||||
|
.or_default(),
|
||||||
|
);
|
||||||
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
if !services()
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.is_joined(sender_user, room_id)?
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Forbidden,
|
|
||||||
"You don't have permission to view this room.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
|
||||||
services()
|
services()
|
||||||
.globals
|
.rooms
|
||||||
.roomid_mutex_state
|
.timeline
|
||||||
.write()
|
.build_and_append_pdu(
|
||||||
.await
|
PduBuilder {
|
||||||
.entry(room_id.to_owned())
|
event_type: TimelineEventType::RoomMember,
|
||||||
.or_default(),
|
content: to_raw_value(&RoomMemberEventContent {
|
||||||
);
|
membership: MembershipState::Invite,
|
||||||
let state_lock = mutex_state.lock().await;
|
displayname: services().users.displayname(user_id)?,
|
||||||
|
avatar_url: services().users.avatar_url(user_id)?,
|
||||||
|
is_direct: Some(is_direct),
|
||||||
|
third_party_invite: None,
|
||||||
|
blurhash: services().users.blurhash(user_id)?,
|
||||||
|
reason,
|
||||||
|
join_authorized_via_users_server: None,
|
||||||
|
})
|
||||||
|
.expect("event is valid, we just created it"),
|
||||||
|
unsigned: None,
|
||||||
|
state_key: Some(user_id.to_string()),
|
||||||
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
|
},
|
||||||
|
sender_user,
|
||||||
|
room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
services()
|
// Critical point ends
|
||||||
.rooms
|
drop(state_lock);
|
||||||
.timeline
|
}
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: TimelineEventType::RoomMember,
|
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
|
||||||
membership: MembershipState::Invite,
|
|
||||||
displayname: services().users.displayname(user_id)?,
|
|
||||||
avatar_url: services().users.avatar_url(user_id)?,
|
|
||||||
is_direct: Some(is_direct),
|
|
||||||
third_party_invite: None,
|
|
||||||
blurhash: services().users.blurhash(user_id)?,
|
|
||||||
reason,
|
|
||||||
join_authorized_via_users_server: None,
|
|
||||||
})
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some(user_id.to_string()),
|
|
||||||
redacts: None,
|
|
||||||
},
|
|
||||||
sender_user,
|
|
||||||
room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
drop(state_lock);
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -1470,12 +1492,15 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<Strin
|
||||||
Some(e) => e,
|
Some(e) => e,
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut event: RoomMemberEventContent = serde_json::from_str(member_event.content.get())
|
let event = RoomMemberEventContent {
|
||||||
.map_err(|_| Error::bad_database("Invalid member event in database."))?;
|
is_direct: None,
|
||||||
|
membership: MembershipState::Leave,
|
||||||
event.membership = MembershipState::Leave;
|
third_party_invite: None,
|
||||||
event.reason = reason;
|
reason,
|
||||||
event.join_authorized_via_users_server = None;
|
join_authorized_via_users_server: None,
|
||||||
|
..serde_json::from_str(member_event.content.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid member event in database."))?
|
||||||
|
};
|
||||||
|
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -1487,6 +1512,7 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<Strin
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some(user_id.to_string()),
|
state_key: Some(user_id.to_string()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
user_id,
|
user_id,
|
||||||
room_id,
|
room_id,
|
||||||
|
@ -1588,7 +1614,7 @@ async fn remote_leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
||||||
let event_id = EventId::parse(format!(
|
let event_id = EventId::parse(format!(
|
||||||
"${}",
|
"${}",
|
||||||
ruma::signatures::reference_hash(&leave_event_stub, &room_version_id)
|
ruma::signatures::reference_hash(&leave_event_stub, &room_version_id)
|
||||||
.expect("ruma can calculate reference hashes")
|
.expect("Event format validated when event was hashed")
|
||||||
))
|
))
|
||||||
.expect("ruma's reference hashes are valid event ids");
|
.expect("ruma's reference hashes are valid event ids");
|
||||||
|
|
||||||
|
|
|
@ -43,7 +43,7 @@ pub async fn send_message_event_route(
|
||||||
&& !services().globals.allow_encryption()
|
&& !services().globals.allow_encryption()
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"Encryption has been disabled",
|
"Encryption has been disabled",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -84,6 +84,11 @@ pub async fn send_message_event_route(
|
||||||
unsigned: Some(unsigned),
|
unsigned: Some(unsigned),
|
||||||
state_key: None,
|
state_key: None,
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: if body.appservice_info.is_some() {
|
||||||
|
body.timestamp
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
},
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
|
|
|
@ -11,6 +11,7 @@ mod keys;
|
||||||
mod media;
|
mod media;
|
||||||
mod membership;
|
mod membership;
|
||||||
mod message;
|
mod message;
|
||||||
|
mod openid;
|
||||||
mod presence;
|
mod presence;
|
||||||
mod profile;
|
mod profile;
|
||||||
mod push;
|
mod push;
|
||||||
|
@ -47,6 +48,7 @@ pub use keys::*;
|
||||||
pub use media::*;
|
pub use media::*;
|
||||||
pub use membership::*;
|
pub use membership::*;
|
||||||
pub use message::*;
|
pub use message::*;
|
||||||
|
pub use openid::*;
|
||||||
pub use presence::*;
|
pub use presence::*;
|
||||||
pub use profile::*;
|
pub use profile::*;
|
||||||
pub use push::*;
|
pub use push::*;
|
||||||
|
|
23
src/api/client_server/openid.rs
Normal file
23
src/api/client_server/openid.rs
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use ruma::{api::client::account, authentication::TokenType};
|
||||||
|
|
||||||
|
use crate::{services, Result, Ruma};
|
||||||
|
|
||||||
|
/// # `POST /_matrix/client/r0/user/{userId}/openid/request_token`
|
||||||
|
///
|
||||||
|
/// Request an OpenID token to verify identity with third-party services.
|
||||||
|
///
|
||||||
|
/// - The token generated is only valid for the OpenID API.
|
||||||
|
pub async fn create_openid_token_route(
|
||||||
|
body: Ruma<account::request_openid_token::v3::Request>,
|
||||||
|
) -> Result<account::request_openid_token::v3::Response> {
|
||||||
|
let (access_token, expires_in) = services().users.create_openid_token(&body.user_id)?;
|
||||||
|
|
||||||
|
Ok(account::request_openid_token::v3::Response {
|
||||||
|
access_token,
|
||||||
|
token_type: TokenType::Bearer,
|
||||||
|
matrix_server_name: services().globals.server_name().to_owned(),
|
||||||
|
expires_in: Duration::from_secs(expires_in),
|
||||||
|
})
|
||||||
|
}
|
|
@ -65,6 +65,7 @@ pub async fn set_displayname_route(
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some(sender_user.to_string()),
|
state_key: Some(sender_user.to_string()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
room_id,
|
room_id,
|
||||||
))
|
))
|
||||||
|
@ -200,6 +201,7 @@ pub async fn set_avatar_url_route(
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some(sender_user.to_string()),
|
state_key: Some(sender_user.to_string()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
room_id,
|
room_id,
|
||||||
))
|
))
|
||||||
|
|
|
@ -44,6 +44,7 @@ pub async fn redact_event_route(
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: None,
|
state_key: None,
|
||||||
redacts: Some(body.event_id.into()),
|
redacts: Some(body.event_id.into()),
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
|
|
|
@ -3,7 +3,7 @@ use ruma::api::client::relations::{
|
||||||
get_relating_events_with_rel_type_and_event_type,
|
get_relating_events_with_rel_type_and_event_type,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{service::rooms::timeline::PduCount, services, Result, Ruma};
|
use crate::{services, Result, Ruma};
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}`
|
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}`
|
||||||
pub async fn get_relating_events_with_rel_type_and_event_type_route(
|
pub async fn get_relating_events_with_rel_type_and_event_type_route(
|
||||||
|
@ -11,27 +11,6 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route(
|
||||||
) -> Result<get_relating_events_with_rel_type_and_event_type::v1::Response> {
|
) -> Result<get_relating_events_with_rel_type_and_event_type::v1::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let from = match body.from.clone() {
|
|
||||||
Some(from) => PduCount::try_from_string(&from)?,
|
|
||||||
None => match ruma::api::Direction::Backward {
|
|
||||||
// TODO: fix ruma so `body.dir` exists
|
|
||||||
ruma::api::Direction::Forward => PduCount::min(),
|
|
||||||
ruma::api::Direction::Backward => PduCount::max(),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
let to = body
|
|
||||||
.to
|
|
||||||
.as_ref()
|
|
||||||
.and_then(|t| PduCount::try_from_string(t).ok());
|
|
||||||
|
|
||||||
// Use limit or else 10, with maximum 100
|
|
||||||
let limit = body
|
|
||||||
.limit
|
|
||||||
.and_then(|u| u32::try_from(u).ok())
|
|
||||||
.map_or(10_usize, |u| u as usize)
|
|
||||||
.min(100);
|
|
||||||
|
|
||||||
let res = services()
|
let res = services()
|
||||||
.rooms
|
.rooms
|
||||||
.pdu_metadata
|
.pdu_metadata
|
||||||
|
@ -41,9 +20,11 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route(
|
||||||
&body.event_id,
|
&body.event_id,
|
||||||
Some(body.event_type.clone()),
|
Some(body.event_type.clone()),
|
||||||
Some(body.rel_type.clone()),
|
Some(body.rel_type.clone()),
|
||||||
from,
|
body.from.clone(),
|
||||||
to,
|
body.to.clone(),
|
||||||
limit,
|
body.limit,
|
||||||
|
body.recurse,
|
||||||
|
&body.dir,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
Ok(
|
Ok(
|
||||||
|
@ -51,6 +32,7 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route(
|
||||||
chunk: res.chunk,
|
chunk: res.chunk,
|
||||||
next_batch: res.next_batch,
|
next_batch: res.next_batch,
|
||||||
prev_batch: res.prev_batch,
|
prev_batch: res.prev_batch,
|
||||||
|
recursion_depth: res.recursion_depth,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -61,27 +43,6 @@ pub async fn get_relating_events_with_rel_type_route(
|
||||||
) -> Result<get_relating_events_with_rel_type::v1::Response> {
|
) -> Result<get_relating_events_with_rel_type::v1::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let from = match body.from.clone() {
|
|
||||||
Some(from) => PduCount::try_from_string(&from)?,
|
|
||||||
None => match ruma::api::Direction::Backward {
|
|
||||||
// TODO: fix ruma so `body.dir` exists
|
|
||||||
ruma::api::Direction::Forward => PduCount::min(),
|
|
||||||
ruma::api::Direction::Backward => PduCount::max(),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
let to = body
|
|
||||||
.to
|
|
||||||
.as_ref()
|
|
||||||
.and_then(|t| PduCount::try_from_string(t).ok());
|
|
||||||
|
|
||||||
// Use limit or else 10, with maximum 100
|
|
||||||
let limit = body
|
|
||||||
.limit
|
|
||||||
.and_then(|u| u32::try_from(u).ok())
|
|
||||||
.map_or(10_usize, |u| u as usize)
|
|
||||||
.min(100);
|
|
||||||
|
|
||||||
let res = services()
|
let res = services()
|
||||||
.rooms
|
.rooms
|
||||||
.pdu_metadata
|
.pdu_metadata
|
||||||
|
@ -91,15 +52,18 @@ pub async fn get_relating_events_with_rel_type_route(
|
||||||
&body.event_id,
|
&body.event_id,
|
||||||
None,
|
None,
|
||||||
Some(body.rel_type.clone()),
|
Some(body.rel_type.clone()),
|
||||||
from,
|
body.from.clone(),
|
||||||
to,
|
body.to.clone(),
|
||||||
limit,
|
body.limit,
|
||||||
|
body.recurse,
|
||||||
|
&body.dir,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
Ok(get_relating_events_with_rel_type::v1::Response {
|
Ok(get_relating_events_with_rel_type::v1::Response {
|
||||||
chunk: res.chunk,
|
chunk: res.chunk,
|
||||||
next_batch: res.next_batch,
|
next_batch: res.next_batch,
|
||||||
prev_batch: res.prev_batch,
|
prev_batch: res.prev_batch,
|
||||||
|
recursion_depth: res.recursion_depth,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -109,27 +73,6 @@ pub async fn get_relating_events_route(
|
||||||
) -> Result<get_relating_events::v1::Response> {
|
) -> Result<get_relating_events::v1::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let from = match body.from.clone() {
|
|
||||||
Some(from) => PduCount::try_from_string(&from)?,
|
|
||||||
None => match ruma::api::Direction::Backward {
|
|
||||||
// TODO: fix ruma so `body.dir` exists
|
|
||||||
ruma::api::Direction::Forward => PduCount::min(),
|
|
||||||
ruma::api::Direction::Backward => PduCount::max(),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
let to = body
|
|
||||||
.to
|
|
||||||
.as_ref()
|
|
||||||
.and_then(|t| PduCount::try_from_string(t).ok());
|
|
||||||
|
|
||||||
// Use limit or else 10, with maximum 100
|
|
||||||
let limit = body
|
|
||||||
.limit
|
|
||||||
.and_then(|u| u32::try_from(u).ok())
|
|
||||||
.map_or(10_usize, |u| u as usize)
|
|
||||||
.min(100);
|
|
||||||
|
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.pdu_metadata
|
.pdu_metadata
|
||||||
|
@ -139,8 +82,10 @@ pub async fn get_relating_events_route(
|
||||||
&body.event_id,
|
&body.event_id,
|
||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
from,
|
body.from.clone(),
|
||||||
to,
|
body.to.clone(),
|
||||||
limit,
|
body.limit,
|
||||||
|
body.recurse,
|
||||||
|
&body.dir,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
|
@ -72,7 +72,7 @@ pub async fn create_room_route(
|
||||||
&& !services().users.is_admin(sender_user)?
|
&& !services().users.is_admin(sender_user)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"Room creation has been disabled.",
|
"Room creation has been disabled.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -230,6 +230,7 @@ pub async fn create_room_route(
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
@ -258,6 +259,7 @@ pub async fn create_room_route(
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some(sender_user.to_string()),
|
state_key: Some(sender_user.to_string()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
@ -311,6 +313,7 @@ pub async fn create_room_route(
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
@ -334,6 +337,7 @@ pub async fn create_room_route(
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
@ -360,6 +364,7 @@ pub async fn create_room_route(
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
@ -381,6 +386,7 @@ pub async fn create_room_route(
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
@ -403,6 +409,7 @@ pub async fn create_room_route(
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
@ -447,6 +454,7 @@ pub async fn create_room_route(
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
@ -469,6 +477,7 @@ pub async fn create_room_route(
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
@ -485,7 +494,10 @@ pub async fn create_room_route(
|
||||||
|
|
||||||
// Homeserver specific stuff
|
// Homeserver specific stuff
|
||||||
if let Some(alias) = alias {
|
if let Some(alias) = alias {
|
||||||
services().rooms.alias.set_alias(&alias, &room_id)?;
|
services()
|
||||||
|
.rooms
|
||||||
|
.alias
|
||||||
|
.set_alias(&alias, &room_id, sender_user)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if body.visibility == room::Visibility::Public {
|
if body.visibility == room::Visibility::Public {
|
||||||
|
@ -522,7 +534,7 @@ pub async fn get_room_event_route(
|
||||||
&body.event_id,
|
&body.event_id,
|
||||||
)? {
|
)? {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"You don't have permission to view this event.",
|
"You don't have permission to view this event.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -551,7 +563,7 @@ pub async fn get_room_aliases_route(
|
||||||
.is_joined(sender_user, &body.room_id)?
|
.is_joined(sender_user, &body.room_id)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"You don't have permission to view this room.",
|
"You don't have permission to view this room.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -626,6 +638,7 @@ pub async fn upgrade_room_route(
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
|
@ -727,6 +740,7 @@ pub async fn upgrade_room_route(
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&replacement_room,
|
&replacement_room,
|
||||||
|
@ -755,6 +769,7 @@ pub async fn upgrade_room_route(
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some(sender_user.to_string()),
|
state_key: Some(sender_user.to_string()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&replacement_room,
|
&replacement_room,
|
||||||
|
@ -797,6 +812,7 @@ pub async fn upgrade_room_route(
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&replacement_room,
|
&replacement_room,
|
||||||
|
@ -815,7 +831,7 @@ pub async fn upgrade_room_route(
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.alias
|
.alias
|
||||||
.set_alias(&alias, &replacement_room)?;
|
.set_alias(&alias, &replacement_room, sender_user)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the old room power levels
|
// Get the old room power levels
|
||||||
|
@ -847,6 +863,7 @@ pub async fn upgrade_room_route(
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
|
|
|
@ -43,7 +43,7 @@ pub async fn search_events_route(
|
||||||
.is_joined(sender_user, &room_id)?
|
.is_joined(sender_user, &room_id)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"You don't have permission to view this room.",
|
"You don't have permission to view this room.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -89,11 +89,12 @@ pub async fn search_events_route(
|
||||||
.get_pdu_from_id(result)
|
.get_pdu_from_id(result)
|
||||||
.ok()?
|
.ok()?
|
||||||
.filter(|pdu| {
|
.filter(|pdu| {
|
||||||
services()
|
!pdu.is_redacted()
|
||||||
.rooms
|
&& services()
|
||||||
.state_accessor
|
.rooms
|
||||||
.user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id)
|
.state_accessor
|
||||||
.unwrap_or(false)
|
.user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id)
|
||||||
|
.unwrap_or(false)
|
||||||
})
|
})
|
||||||
.map(|pdu| pdu.to_room_event())
|
.map(|pdu| pdu.to_room_event())
|
||||||
})
|
})
|
||||||
|
|
|
@ -63,7 +63,7 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
||||||
UserId::parse(user)
|
UserId::parse(user)
|
||||||
} else {
|
} else {
|
||||||
warn!("Bad login type: {:?}", &body.login_info);
|
warn!("Bad login type: {:?}", &body.login_info);
|
||||||
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
|
return Err(Error::BadRequest(ErrorKind::forbidden(), "Bad login type."));
|
||||||
}
|
}
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;
|
||||||
|
|
||||||
|
@ -78,7 +78,7 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
||||||
.users
|
.users
|
||||||
.password_hash(&user_id)?
|
.password_hash(&user_id)?
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"Wrong username or password.",
|
"Wrong username or password.",
|
||||||
))?;
|
))?;
|
||||||
|
|
||||||
|
@ -93,7 +93,7 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
||||||
|
|
||||||
if !hash_matches {
|
if !hash_matches {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"Wrong username or password.",
|
"Wrong username or password.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -143,7 +143,7 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
||||||
UserId::parse(user)
|
UserId::parse(user)
|
||||||
} else {
|
} else {
|
||||||
warn!("Bad login type: {:?}", &body.login_info);
|
warn!("Bad login type: {:?}", &body.login_info);
|
||||||
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
|
return Err(Error::BadRequest(ErrorKind::forbidden(), "Bad login type."));
|
||||||
}
|
}
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@ use ruma::{
|
||||||
room::canonical_alias::RoomCanonicalAliasEventContent, AnyStateEventContent, StateEventType,
|
room::canonical_alias::RoomCanonicalAliasEventContent, AnyStateEventContent, StateEventType,
|
||||||
},
|
},
|
||||||
serde::Raw,
|
serde::Raw,
|
||||||
EventId, RoomId, UserId,
|
EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId,
|
||||||
};
|
};
|
||||||
use tracing::log::warn;
|
use tracing::log::warn;
|
||||||
|
|
||||||
|
@ -32,6 +32,11 @@ pub async fn send_state_event_for_key_route(
|
||||||
&body.event_type,
|
&body.event_type,
|
||||||
&body.body.body, // Yes, I hate it too
|
&body.body.body, // Yes, I hate it too
|
||||||
body.state_key.to_owned(),
|
body.state_key.to_owned(),
|
||||||
|
if body.appservice_info.is_some() {
|
||||||
|
body.timestamp
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
},
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
@ -54,7 +59,7 @@ pub async fn send_state_event_for_empty_key_route(
|
||||||
// Forbid m.room.encryption if encryption is disabled
|
// Forbid m.room.encryption if encryption is disabled
|
||||||
if body.event_type == StateEventType::RoomEncryption && !services().globals.allow_encryption() {
|
if body.event_type == StateEventType::RoomEncryption && !services().globals.allow_encryption() {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"Encryption has been disabled",
|
"Encryption has been disabled",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -65,6 +70,11 @@ pub async fn send_state_event_for_empty_key_route(
|
||||||
&body.event_type.to_string().into(),
|
&body.event_type.to_string().into(),
|
||||||
&body.body.body,
|
&body.body.body,
|
||||||
body.state_key.to_owned(),
|
body.state_key.to_owned(),
|
||||||
|
if body.appservice_info.is_some() {
|
||||||
|
body.timestamp
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
},
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
@ -88,7 +98,7 @@ pub async fn get_state_events_route(
|
||||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"You don't have permission to view the room state.",
|
"You don't have permission to view the room state.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -121,7 +131,7 @@ pub async fn get_state_events_for_key_route(
|
||||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"You don't have permission to view the room state.",
|
"You don't have permission to view the room state.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -160,7 +170,7 @@ pub async fn get_state_events_for_empty_key_route(
|
||||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"You don't have permission to view the room state.",
|
"You don't have permission to view the room state.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -190,6 +200,7 @@ async fn send_state_event_for_key_helper(
|
||||||
event_type: &StateEventType,
|
event_type: &StateEventType,
|
||||||
json: &Raw<AnyStateEventContent>,
|
json: &Raw<AnyStateEventContent>,
|
||||||
state_key: String,
|
state_key: String,
|
||||||
|
timestamp: Option<MilliSecondsSinceUnixEpoch>,
|
||||||
) -> Result<Arc<EventId>> {
|
) -> Result<Arc<EventId>> {
|
||||||
let sender_user = sender;
|
let sender_user = sender;
|
||||||
|
|
||||||
|
@ -214,7 +225,7 @@ async fn send_state_event_for_key_helper(
|
||||||
.is_none()
|
.is_none()
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"You are only allowed to send canonical_alias \
|
"You are only allowed to send canonical_alias \
|
||||||
events when it's aliases already exists",
|
events when it's aliases already exists",
|
||||||
));
|
));
|
||||||
|
@ -243,6 +254,7 @@ async fn send_state_event_for_key_helper(
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some(state_key),
|
state_key: Some(state_key),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp,
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
room_id,
|
room_id,
|
||||||
|
|
|
@ -12,7 +12,7 @@ use ruma::{
|
||||||
Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom,
|
Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom,
|
||||||
LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State, Timeline, ToDevice,
|
LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State, Timeline, ToDevice,
|
||||||
},
|
},
|
||||||
v4::SlidingOp,
|
v4::{SlidingOp, SlidingSyncRoomHero},
|
||||||
DeviceLists, UnreadNotificationsCount,
|
DeviceLists, UnreadNotificationsCount,
|
||||||
},
|
},
|
||||||
uiaa::UiaaResponse,
|
uiaa::UiaaResponse,
|
||||||
|
@ -716,7 +716,7 @@ async fn load_joined_room(
|
||||||
.state_cache
|
.state_cache
|
||||||
.is_invited(&user_id, room_id)?)
|
.is_invited(&user_id, room_id)?)
|
||||||
{
|
{
|
||||||
Ok::<_, Error>(Some(state_key.clone()))
|
Ok::<_, Error>(Some(user_id))
|
||||||
} else {
|
} else {
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
|
@ -1572,7 +1572,7 @@ pub async fn sync_events_v4_route(
|
||||||
sender_user.clone(),
|
sender_user.clone(),
|
||||||
sender_device.clone(),
|
sender_device.clone(),
|
||||||
conn_id.clone(),
|
conn_id.clone(),
|
||||||
body.room_subscriptions,
|
body.room_subscriptions.clone(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1638,33 +1638,37 @@ pub async fn sync_events_v4_route(
|
||||||
.get_member(room_id, &member)
|
.get_member(room_id, &member)
|
||||||
.ok()
|
.ok()
|
||||||
.flatten()
|
.flatten()
|
||||||
.map(|memberevent| {
|
.map(|memberevent| SlidingSyncRoomHero {
|
||||||
(
|
user_id: member,
|
||||||
memberevent
|
name: memberevent.displayname,
|
||||||
.displayname
|
avatar: memberevent.avatar_url,
|
||||||
.unwrap_or_else(|| member.to_string()),
|
|
||||||
memberevent.avatar_url,
|
|
||||||
)
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
.take(5)
|
.take(5)
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
let name = match &heroes[..] {
|
let name = match &heroes[..] {
|
||||||
[] => None,
|
[] => None,
|
||||||
[only] => Some(only.0.clone()),
|
[only] => Some(
|
||||||
|
only.name
|
||||||
|
.clone()
|
||||||
|
.unwrap_or_else(|| only.user_id.to_string()),
|
||||||
|
),
|
||||||
[firsts @ .., last] => Some(
|
[firsts @ .., last] => Some(
|
||||||
firsts
|
firsts
|
||||||
.iter()
|
.iter()
|
||||||
.map(|h| h.0.clone())
|
.map(|h| h.name.clone().unwrap_or_else(|| h.user_id.to_string()))
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.join(", ")
|
.join(", ")
|
||||||
+ " and "
|
+ " and "
|
||||||
+ &last.0,
|
+ &last
|
||||||
|
.name
|
||||||
|
.clone()
|
||||||
|
.unwrap_or_else(|| last.user_id.to_string()),
|
||||||
),
|
),
|
||||||
};
|
};
|
||||||
|
|
||||||
let avatar = if let [only] = &heroes[..] {
|
let avatar = if let [only] = &heroes[..] {
|
||||||
only.1.clone()
|
only.avatar.clone()
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
@ -1725,6 +1729,16 @@ pub async fn sync_events_v4_route(
|
||||||
),
|
),
|
||||||
num_live: None, // Count events in timeline greater than global sync counter
|
num_live: None, // Count events in timeline greater than global sync counter
|
||||||
timestamp: None,
|
timestamp: None,
|
||||||
|
heroes: if body
|
||||||
|
.room_subscriptions
|
||||||
|
.get(room_id)
|
||||||
|
.map(|sub| sub.include_heroes.unwrap_or_default())
|
||||||
|
.unwrap_or_default()
|
||||||
|
{
|
||||||
|
Some(heroes)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
},
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,7 @@ pub async fn create_typing_event_route(
|
||||||
.is_joined(sender_user, &body.room_id)?
|
.is_joined(sender_user, &body.room_id)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"You are not in this room.",
|
"You are not in this room.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,7 +27,10 @@ pub async fn get_supported_versions_route(
|
||||||
"v1.4".to_owned(),
|
"v1.4".to_owned(),
|
||||||
"v1.5".to_owned(),
|
"v1.5".to_owned(),
|
||||||
],
|
],
|
||||||
unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]),
|
unstable_features: BTreeMap::from_iter([
|
||||||
|
("org.matrix.e2e_cross_signing".to_owned(), true),
|
||||||
|
("org.matrix.msc3916.stable".to_owned(), true),
|
||||||
|
]),
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(resp)
|
Ok(resp)
|
||||||
|
|
|
@ -2,20 +2,22 @@ use std::{collections::BTreeMap, iter::FromIterator, str};
|
||||||
|
|
||||||
use axum::{
|
use axum::{
|
||||||
async_trait,
|
async_trait,
|
||||||
body::{Full, HttpBody},
|
body::Body,
|
||||||
extract::{rejection::TypedHeaderRejectionReason, FromRequest, Path, TypedHeader},
|
extract::{FromRequest, Path},
|
||||||
headers::{
|
|
||||||
authorization::{Bearer, Credentials},
|
|
||||||
Authorization,
|
|
||||||
},
|
|
||||||
response::{IntoResponse, Response},
|
response::{IntoResponse, Response},
|
||||||
BoxError, RequestExt, RequestPartsExt,
|
RequestExt, RequestPartsExt,
|
||||||
};
|
};
|
||||||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
use axum_extra::{
|
||||||
|
headers::{authorization::Bearer, Authorization},
|
||||||
|
typed_header::TypedHeaderRejectionReason,
|
||||||
|
TypedHeader,
|
||||||
|
};
|
||||||
|
use bytes::{BufMut, BytesMut};
|
||||||
use http::{Request, StatusCode};
|
use http::{Request, StatusCode};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse},
|
api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse},
|
||||||
CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId,
|
server_util::authorization::XMatrix,
|
||||||
|
CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedUserId, UserId,
|
||||||
};
|
};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use tracing::{debug, error, warn};
|
use tracing::{debug, error, warn};
|
||||||
|
@ -31,37 +33,33 @@ enum Token {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T, S, B> FromRequest<S, B> for Ruma<T>
|
impl<T, S> FromRequest<S> for Ruma<T>
|
||||||
where
|
where
|
||||||
T: IncomingRequest,
|
T: IncomingRequest,
|
||||||
B: HttpBody + Send + 'static,
|
|
||||||
B::Data: Send,
|
|
||||||
B::Error: Into<BoxError>,
|
|
||||||
{
|
{
|
||||||
type Rejection = Error;
|
type Rejection = Error;
|
||||||
|
|
||||||
async fn from_request(req: Request<B>, _state: &S) -> Result<Self, Self::Rejection> {
|
async fn from_request(req: Request<Body>, _state: &S) -> Result<Self, Self::Rejection> {
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
struct QueryParams {
|
struct QueryParams {
|
||||||
access_token: Option<String>,
|
access_token: Option<String>,
|
||||||
user_id: Option<String>,
|
user_id: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
let (mut parts, mut body) = match req.with_limited_body() {
|
let (mut parts, mut body) = {
|
||||||
Ok(limited_req) => {
|
let limited_req = req.with_limited_body();
|
||||||
let (parts, body) = limited_req.into_parts();
|
let (parts, body) = limited_req.into_parts();
|
||||||
let body = to_bytes(body)
|
let body = axum::body::to_bytes(
|
||||||
.await
|
body,
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?;
|
services()
|
||||||
(parts, body)
|
.globals
|
||||||
}
|
.max_request_size()
|
||||||
Err(original_req) => {
|
.try_into()
|
||||||
let (parts, body) = original_req.into_parts();
|
.unwrap_or(usize::MAX),
|
||||||
let body = to_bytes(body)
|
)
|
||||||
.await
|
.await
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?;
|
.map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?;
|
||||||
(parts, body)
|
(parts, body)
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let metadata = T::METADATA;
|
let metadata = T::METADATA;
|
||||||
|
@ -102,10 +100,15 @@ where
|
||||||
let (sender_user, sender_device, sender_servername, appservice_info) =
|
let (sender_user, sender_device, sender_servername, appservice_info) =
|
||||||
match (metadata.authentication, token) {
|
match (metadata.authentication, token) {
|
||||||
(_, Token::Invalid) => {
|
(_, Token::Invalid) => {
|
||||||
return Err(Error::BadRequest(
|
// OpenID endpoint uses a query param with the same name, drop this once query params for user auth are removed from the spec
|
||||||
ErrorKind::UnknownToken { soft_logout: false },
|
if query_params.access_token.is_some() {
|
||||||
"Unknown access token.",
|
(None, None, None, None)
|
||||||
))
|
} else {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::UnknownToken { soft_logout: false },
|
||||||
|
"Unknown access token.",
|
||||||
|
));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
(AuthScheme::AccessToken, Token::Appservice(info)) => {
|
(AuthScheme::AccessToken, Token::Appservice(info)) => {
|
||||||
let user_id = query_params
|
let user_id = query_params
|
||||||
|
@ -132,7 +135,7 @@ where
|
||||||
|
|
||||||
if !services().users.exists(&user_id)? {
|
if !services().users.exists(&user_id)? {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"User does not exist.",
|
"User does not exist.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -172,7 +175,7 @@ where
|
||||||
_ => "Unknown header-related error",
|
_ => "Unknown header-related error",
|
||||||
};
|
};
|
||||||
|
|
||||||
Error::BadRequest(ErrorKind::Forbidden, msg)
|
Error::BadRequest(ErrorKind::forbidden(), msg)
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
if let Some(dest) = x_matrix.destination {
|
if let Some(dest) = x_matrix.destination {
|
||||||
|
@ -186,12 +189,17 @@ where
|
||||||
|
|
||||||
let origin_signatures = BTreeMap::from_iter([(
|
let origin_signatures = BTreeMap::from_iter([(
|
||||||
x_matrix.key.clone(),
|
x_matrix.key.clone(),
|
||||||
CanonicalJsonValue::String(x_matrix.sig),
|
CanonicalJsonValue::String(x_matrix.sig.to_string()),
|
||||||
)]);
|
)]);
|
||||||
|
|
||||||
let signatures = BTreeMap::from_iter([(
|
let signatures = BTreeMap::from_iter([(
|
||||||
x_matrix.origin.as_str().to_owned(),
|
x_matrix.origin.as_str().to_owned(),
|
||||||
CanonicalJsonValue::Object(origin_signatures),
|
CanonicalJsonValue::Object(
|
||||||
|
origin_signatures
|
||||||
|
.into_iter()
|
||||||
|
.map(|(k, v)| (k.to_string(), v))
|
||||||
|
.collect(),
|
||||||
|
),
|
||||||
)]);
|
)]);
|
||||||
|
|
||||||
let mut request_map = BTreeMap::from_iter([
|
let mut request_map = BTreeMap::from_iter([
|
||||||
|
@ -226,7 +234,7 @@ where
|
||||||
let keys_result = services()
|
let keys_result = services()
|
||||||
.rooms
|
.rooms
|
||||||
.event_handler
|
.event_handler
|
||||||
.fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_owned()])
|
.fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_string()], false)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let keys = match keys_result {
|
let keys = match keys_result {
|
||||||
|
@ -234,14 +242,25 @@ where
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!("Failed to fetch signing keys: {}", e);
|
warn!("Failed to fetch signing keys: {}", e);
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"Failed to fetch signing keys.",
|
"Failed to fetch signing keys.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let pub_key_map =
|
// Only verify_keys that are currently valid should be used for validating requests
|
||||||
BTreeMap::from_iter([(x_matrix.origin.as_str().to_owned(), keys)]);
|
// as per MSC4029
|
||||||
|
let pub_key_map = BTreeMap::from_iter([(
|
||||||
|
x_matrix.origin.as_str().to_owned(),
|
||||||
|
if keys.valid_until_ts > MilliSecondsSinceUnixEpoch::now() {
|
||||||
|
keys.verify_keys
|
||||||
|
.into_iter()
|
||||||
|
.map(|(id, key)| (id, key.key))
|
||||||
|
.collect()
|
||||||
|
} else {
|
||||||
|
BTreeMap::new()
|
||||||
|
},
|
||||||
|
)]);
|
||||||
|
|
||||||
match ruma::signatures::verify_json(&pub_key_map, &request_map) {
|
match ruma::signatures::verify_json(&pub_key_map, &request_map) {
|
||||||
Ok(()) => (None, None, Some(x_matrix.origin), None),
|
Ok(()) => (None, None, Some(x_matrix.origin), None),
|
||||||
|
@ -260,7 +279,7 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"Failed to verify X-Matrix signatures.",
|
"Failed to verify X-Matrix signatures.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -340,124 +359,11 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct XMatrix {
|
|
||||||
destination: Option<OwnedServerName>,
|
|
||||||
origin: OwnedServerName,
|
|
||||||
key: String, // KeyName?
|
|
||||||
sig: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Credentials for XMatrix {
|
|
||||||
const SCHEME: &'static str = "X-Matrix";
|
|
||||||
|
|
||||||
fn decode(value: &http::HeaderValue) -> Option<Self> {
|
|
||||||
debug_assert!(
|
|
||||||
value.as_bytes().starts_with(b"X-Matrix "),
|
|
||||||
"HeaderValue to decode should start with \"X-Matrix ..\", received = {value:?}",
|
|
||||||
);
|
|
||||||
|
|
||||||
let parameters = str::from_utf8(&value.as_bytes()["X-Matrix ".len()..])
|
|
||||||
.ok()?
|
|
||||||
.trim_start();
|
|
||||||
|
|
||||||
let mut origin = None;
|
|
||||||
let mut key = None;
|
|
||||||
let mut sig = None;
|
|
||||||
let mut destination = None;
|
|
||||||
|
|
||||||
for entry in parameters.split_terminator(',') {
|
|
||||||
let (name, value) = entry.split_once('=')?;
|
|
||||||
|
|
||||||
// It's not at all clear why some fields are quoted and others not in the spec,
|
|
||||||
// let's simply accept either form for every field.
|
|
||||||
let value = value
|
|
||||||
.strip_prefix('"')
|
|
||||||
.and_then(|rest| rest.strip_suffix('"'))
|
|
||||||
.unwrap_or(value);
|
|
||||||
|
|
||||||
// FIXME: Catch multiple fields of the same name
|
|
||||||
match name {
|
|
||||||
"origin" => origin = Some(value.try_into().ok()?),
|
|
||||||
"key" => key = Some(value.to_owned()),
|
|
||||||
"sig" => sig = Some(value.to_owned()),
|
|
||||||
"destination" => destination = Some(value.try_into().ok()?),
|
|
||||||
_ => debug!(
|
|
||||||
"Unexpected field `{}` in X-Matrix Authorization header",
|
|
||||||
name
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Some(Self {
|
|
||||||
destination,
|
|
||||||
origin: origin?,
|
|
||||||
key: key?,
|
|
||||||
sig: sig?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn encode(&self) -> http::HeaderValue {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: OutgoingResponse> IntoResponse for RumaResponse<T> {
|
impl<T: OutgoingResponse> IntoResponse for RumaResponse<T> {
|
||||||
fn into_response(self) -> Response {
|
fn into_response(self) -> Response {
|
||||||
match self.0.try_into_http_response::<BytesMut>() {
|
match self.0.try_into_http_response::<BytesMut>() {
|
||||||
Ok(res) => res.map(BytesMut::freeze).map(Full::new).into_response(),
|
Ok(res) => res.map(BytesMut::freeze).map(Body::from).into_response(),
|
||||||
Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
|
Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// copied from hyper under the following license:
|
|
||||||
// Copyright (c) 2014-2021 Sean McArthur
|
|
||||||
|
|
||||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
// of this software and associated documentation files (the "Software"), to deal
|
|
||||||
// in the Software without restriction, including without limitation the rights
|
|
||||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
// copies of the Software, and to permit persons to whom the Software is
|
|
||||||
// furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
// The above copyright notice and this permission notice shall be included in
|
|
||||||
// all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
// THE SOFTWARE.
|
|
||||||
pub(crate) async fn to_bytes<T>(body: T) -> Result<Bytes, T::Error>
|
|
||||||
where
|
|
||||||
T: HttpBody,
|
|
||||||
{
|
|
||||||
futures_util::pin_mut!(body);
|
|
||||||
|
|
||||||
// If there's only 1 chunk, we can just return Buf::to_bytes()
|
|
||||||
let mut first = if let Some(buf) = body.data().await {
|
|
||||||
buf?
|
|
||||||
} else {
|
|
||||||
return Ok(Bytes::new());
|
|
||||||
};
|
|
||||||
|
|
||||||
let second = if let Some(buf) = body.data().await {
|
|
||||||
buf?
|
|
||||||
} else {
|
|
||||||
return Ok(first.copy_to_bytes(first.remaining()));
|
|
||||||
};
|
|
||||||
|
|
||||||
// With more than 1 buf, we gotta flatten into a Vec first.
|
|
||||||
let cap = first.remaining() + second.remaining() + body.size_hint().lower() as usize;
|
|
||||||
let mut vec = Vec::with_capacity(cap);
|
|
||||||
vec.put(first);
|
|
||||||
vec.put(second);
|
|
||||||
|
|
||||||
while let Some(buf) = body.data().await {
|
|
||||||
vec.put(buf?);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(vec.into())
|
|
||||||
}
|
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -47,6 +47,8 @@ pub struct Config {
|
||||||
#[serde(default = "false_fn")]
|
#[serde(default = "false_fn")]
|
||||||
pub allow_registration: bool,
|
pub allow_registration: bool,
|
||||||
pub registration_token: Option<String>,
|
pub registration_token: Option<String>,
|
||||||
|
#[serde(default = "default_openid_token_ttl")]
|
||||||
|
pub openid_token_ttl: u64,
|
||||||
#[serde(default = "true_fn")]
|
#[serde(default = "true_fn")]
|
||||||
pub allow_encryption: bool,
|
pub allow_encryption: bool,
|
||||||
#[serde(default = "false_fn")]
|
#[serde(default = "false_fn")]
|
||||||
|
@ -57,7 +59,7 @@ pub struct Config {
|
||||||
pub allow_unstable_room_versions: bool,
|
pub allow_unstable_room_versions: bool,
|
||||||
#[serde(default = "default_default_room_version")]
|
#[serde(default = "default_default_room_version")]
|
||||||
pub default_room_version: RoomVersionId,
|
pub default_room_version: RoomVersionId,
|
||||||
#[serde(default)]
|
#[serde(default, flatten)]
|
||||||
pub well_known: WellKnownConfig,
|
pub well_known: WellKnownConfig,
|
||||||
#[serde(default = "false_fn")]
|
#[serde(default = "false_fn")]
|
||||||
pub allow_jaeger: bool,
|
pub allow_jaeger: bool,
|
||||||
|
@ -95,7 +97,9 @@ pub struct TlsConfig {
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize, Default)]
|
#[derive(Clone, Debug, Deserialize, Default)]
|
||||||
pub struct WellKnownConfig {
|
pub struct WellKnownConfig {
|
||||||
|
#[serde(rename = "well_known_client")]
|
||||||
pub client: Option<Url>,
|
pub client: Option<Url>,
|
||||||
|
#[serde(rename = "well_known_server")]
|
||||||
pub server: Option<OwnedServerName>,
|
pub server: Option<OwnedServerName>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -302,6 +306,10 @@ fn default_turn_ttl() -> u64 {
|
||||||
60 * 60 * 24
|
60 * 60 * 24
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn default_openid_token_ttl() -> u64 {
|
||||||
|
60 * 60
|
||||||
|
}
|
||||||
|
|
||||||
// I know, it's a great name
|
// I know, it's a great name
|
||||||
pub fn default_default_room_version() -> RoomVersionId {
|
pub fn default_default_room_version() -> RoomVersionId {
|
||||||
RoomVersionId::V10
|
RoomVersionId::V10
|
||||||
|
|
|
@ -1,15 +1,19 @@
|
||||||
use std::collections::{BTreeMap, HashMap};
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use futures_util::{stream::FuturesUnordered, StreamExt};
|
use futures_util::{stream::FuturesUnordered, StreamExt};
|
||||||
use lru_cache::LruCache;
|
use lru_cache::LruCache;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::federation::discovery::{ServerSigningKeys, VerifyKey},
|
api::federation::discovery::{OldVerifyKey, ServerSigningKeys},
|
||||||
signatures::Ed25519KeyPair,
|
signatures::Ed25519KeyPair,
|
||||||
DeviceId, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName, UserId,
|
DeviceId, ServerName, UserId,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
use crate::{
|
||||||
|
database::KeyValueDatabase,
|
||||||
|
service::{self, globals::SigningKeys},
|
||||||
|
services, utils, Error, Result,
|
||||||
|
};
|
||||||
|
|
||||||
pub const COUNTER: &[u8] = b"c";
|
pub const COUNTER: &[u8] = b"c";
|
||||||
pub const LAST_CHECK_FOR_UPDATES_COUNT: &[u8] = b"u";
|
pub const LAST_CHECK_FOR_UPDATES_COUNT: &[u8] = b"u";
|
||||||
|
@ -237,64 +241,97 @@ lasttimelinecount_cache: {lasttimelinecount_cache}\n"
|
||||||
self.global.remove(b"keypair")
|
self.global.remove(b"keypair")
|
||||||
}
|
}
|
||||||
|
|
||||||
fn add_signing_key(
|
fn add_signing_key_from_trusted_server(
|
||||||
&self,
|
&self,
|
||||||
origin: &ServerName,
|
origin: &ServerName,
|
||||||
new_keys: ServerSigningKeys,
|
new_keys: ServerSigningKeys,
|
||||||
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
|
) -> Result<SigningKeys> {
|
||||||
// Not atomic, but this is not critical
|
let prev_keys = self.server_signingkeys.get(origin.as_bytes())?;
|
||||||
let signingkeys = self.server_signingkeys.get(origin.as_bytes())?;
|
|
||||||
|
|
||||||
let mut keys = signingkeys
|
Ok(
|
||||||
.and_then(|keys| serde_json::from_slice(&keys).ok())
|
if let Some(mut prev_keys) =
|
||||||
.unwrap_or_else(|| {
|
prev_keys.and_then(|keys| serde_json::from_slice::<ServerSigningKeys>(&keys).ok())
|
||||||
// Just insert "now", it doesn't matter
|
{
|
||||||
ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now())
|
let ServerSigningKeys {
|
||||||
});
|
verify_keys,
|
||||||
|
old_verify_keys,
|
||||||
|
..
|
||||||
|
} = new_keys;
|
||||||
|
|
||||||
let ServerSigningKeys {
|
prev_keys.verify_keys.extend(verify_keys);
|
||||||
verify_keys,
|
prev_keys.old_verify_keys.extend(old_verify_keys);
|
||||||
old_verify_keys,
|
prev_keys.valid_until_ts = new_keys.valid_until_ts;
|
||||||
..
|
|
||||||
} = new_keys;
|
|
||||||
|
|
||||||
keys.verify_keys.extend(verify_keys);
|
self.server_signingkeys.insert(
|
||||||
keys.old_verify_keys.extend(old_verify_keys);
|
origin.as_bytes(),
|
||||||
|
&serde_json::to_vec(&prev_keys).expect("serversigningkeys can be serialized"),
|
||||||
|
)?;
|
||||||
|
|
||||||
self.server_signingkeys.insert(
|
prev_keys.into()
|
||||||
origin.as_bytes(),
|
} else {
|
||||||
&serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"),
|
self.server_signingkeys.insert(
|
||||||
)?;
|
origin.as_bytes(),
|
||||||
|
&serde_json::to_vec(&new_keys).expect("serversigningkeys can be serialized"),
|
||||||
|
)?;
|
||||||
|
|
||||||
let mut tree = keys.verify_keys;
|
new_keys.into()
|
||||||
tree.extend(
|
},
|
||||||
keys.old_verify_keys
|
)
|
||||||
.into_iter()
|
}
|
||||||
.map(|old| (old.0, VerifyKey::new(old.1.key))),
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(tree)
|
fn add_signing_key_from_origin(
|
||||||
|
&self,
|
||||||
|
origin: &ServerName,
|
||||||
|
new_keys: ServerSigningKeys,
|
||||||
|
) -> Result<SigningKeys> {
|
||||||
|
let prev_keys = self.server_signingkeys.get(origin.as_bytes())?;
|
||||||
|
|
||||||
|
Ok(
|
||||||
|
if let Some(mut prev_keys) =
|
||||||
|
prev_keys.and_then(|keys| serde_json::from_slice::<ServerSigningKeys>(&keys).ok())
|
||||||
|
{
|
||||||
|
let ServerSigningKeys {
|
||||||
|
verify_keys,
|
||||||
|
old_verify_keys,
|
||||||
|
..
|
||||||
|
} = new_keys;
|
||||||
|
|
||||||
|
// Moving `verify_keys` no longer present to `old_verify_keys`
|
||||||
|
for (key_id, key) in prev_keys.verify_keys {
|
||||||
|
if !verify_keys.contains_key(&key_id) {
|
||||||
|
prev_keys
|
||||||
|
.old_verify_keys
|
||||||
|
.insert(key_id, OldVerifyKey::new(prev_keys.valid_until_ts, key.key));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
prev_keys.verify_keys = verify_keys;
|
||||||
|
prev_keys.old_verify_keys.extend(old_verify_keys);
|
||||||
|
prev_keys.valid_until_ts = new_keys.valid_until_ts;
|
||||||
|
|
||||||
|
self.server_signingkeys.insert(
|
||||||
|
origin.as_bytes(),
|
||||||
|
&serde_json::to_vec(&prev_keys).expect("serversigningkeys can be serialized"),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
prev_keys.into()
|
||||||
|
} else {
|
||||||
|
self.server_signingkeys.insert(
|
||||||
|
origin.as_bytes(),
|
||||||
|
&serde_json::to_vec(&new_keys).expect("serversigningkeys can be serialized"),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
new_keys.into()
|
||||||
|
},
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server.
|
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server.
|
||||||
fn signing_keys_for(
|
fn signing_keys_for(&self, origin: &ServerName) -> Result<Option<SigningKeys>> {
|
||||||
&self,
|
|
||||||
origin: &ServerName,
|
|
||||||
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
|
|
||||||
let signingkeys = self
|
let signingkeys = self
|
||||||
.server_signingkeys
|
.server_signingkeys
|
||||||
.get(origin.as_bytes())?
|
.get(origin.as_bytes())?
|
||||||
.and_then(|bytes| serde_json::from_slice(&bytes).ok())
|
.and_then(|bytes| serde_json::from_slice::<SigningKeys>(&bytes).ok());
|
||||||
.map(|keys: ServerSigningKeys| {
|
|
||||||
let mut tree = keys.verify_keys;
|
|
||||||
tree.extend(
|
|
||||||
keys.old_verify_keys
|
|
||||||
.into_iter()
|
|
||||||
.map(|old| (old.0, VerifyKey::new(old.1.key))),
|
|
||||||
);
|
|
||||||
tree
|
|
||||||
})
|
|
||||||
.unwrap_or_else(BTreeMap::new);
|
|
||||||
|
|
||||||
Ok(signingkeys)
|
Ok(signingkeys)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use ruma::api::client::error::ErrorKind;
|
use ruma::{api::client::error::ErrorKind, http_headers::ContentDisposition};
|
||||||
|
|
||||||
use crate::{database::KeyValueDatabase, service, utils, Error, Result};
|
use crate::{database::KeyValueDatabase, service, utils, Error, Result};
|
||||||
|
|
||||||
|
@ -8,7 +8,7 @@ impl service::media::Data for KeyValueDatabase {
|
||||||
mxc: String,
|
mxc: String,
|
||||||
width: u32,
|
width: u32,
|
||||||
height: u32,
|
height: u32,
|
||||||
content_disposition: Option<&str>,
|
content_disposition: &ContentDisposition,
|
||||||
content_type: Option<&str>,
|
content_type: Option<&str>,
|
||||||
) -> Result<Vec<u8>> {
|
) -> Result<Vec<u8>> {
|
||||||
let mut key = mxc.as_bytes().to_vec();
|
let mut key = mxc.as_bytes().to_vec();
|
||||||
|
@ -16,12 +16,7 @@ impl service::media::Data for KeyValueDatabase {
|
||||||
key.extend_from_slice(&width.to_be_bytes());
|
key.extend_from_slice(&width.to_be_bytes());
|
||||||
key.extend_from_slice(&height.to_be_bytes());
|
key.extend_from_slice(&height.to_be_bytes());
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
key.extend_from_slice(
|
key.extend_from_slice(content_disposition.to_string().as_bytes());
|
||||||
content_disposition
|
|
||||||
.as_ref()
|
|
||||||
.map(|f| f.as_bytes())
|
|
||||||
.unwrap_or_default(),
|
|
||||||
);
|
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
key.extend_from_slice(
|
key.extend_from_slice(
|
||||||
content_type
|
content_type
|
||||||
|
@ -40,7 +35,7 @@ impl service::media::Data for KeyValueDatabase {
|
||||||
mxc: String,
|
mxc: String,
|
||||||
width: u32,
|
width: u32,
|
||||||
height: u32,
|
height: u32,
|
||||||
) -> Result<(Option<String>, Option<String>, Vec<u8>)> {
|
) -> Result<(ContentDisposition, Option<String>, Vec<u8>)> {
|
||||||
let mut prefix = mxc.as_bytes().to_vec();
|
let mut prefix = mxc.as_bytes().to_vec();
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
prefix.extend_from_slice(&width.to_be_bytes());
|
prefix.extend_from_slice(&width.to_be_bytes());
|
||||||
|
@ -68,15 +63,9 @@ impl service::media::Data for KeyValueDatabase {
|
||||||
.next()
|
.next()
|
||||||
.ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?;
|
.ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?;
|
||||||
|
|
||||||
let content_disposition = if content_disposition_bytes.is_empty() {
|
let content_disposition = content_disposition_bytes.try_into().unwrap_or_else(|_| {
|
||||||
None
|
ContentDisposition::new(ruma::http_headers::ContentDispositionType::Inline)
|
||||||
} else {
|
});
|
||||||
Some(
|
|
||||||
utils::string_from_bytes(content_disposition_bytes).map_err(|_| {
|
|
||||||
Error::bad_database("Content Disposition in mediaid_file is invalid unicode.")
|
|
||||||
})?,
|
|
||||||
)
|
|
||||||
};
|
|
||||||
Ok((content_disposition, content_type, key))
|
Ok((content_disposition, content_type, key))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,9 +1,15 @@
|
||||||
use ruma::{api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId};
|
use ruma::{
|
||||||
|
api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId,
|
||||||
|
UserId,
|
||||||
|
};
|
||||||
|
|
||||||
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||||
|
|
||||||
impl service::rooms::alias::Data for KeyValueDatabase {
|
impl service::rooms::alias::Data for KeyValueDatabase {
|
||||||
fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> {
|
fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()> {
|
||||||
|
// Comes first as we don't want a stuck alias
|
||||||
|
self.alias_userid
|
||||||
|
.insert(alias.alias().as_bytes(), user_id.as_bytes())?;
|
||||||
self.alias_roomid
|
self.alias_roomid
|
||||||
.insert(alias.alias().as_bytes(), room_id.as_bytes())?;
|
.insert(alias.alias().as_bytes(), room_id.as_bytes())?;
|
||||||
let mut aliasid = room_id.as_bytes().to_vec();
|
let mut aliasid = room_id.as_bytes().to_vec();
|
||||||
|
@ -22,13 +28,13 @@ impl service::rooms::alias::Data for KeyValueDatabase {
|
||||||
self.aliasid_alias.remove(&key)?;
|
self.aliasid_alias.remove(&key)?;
|
||||||
}
|
}
|
||||||
self.alias_roomid.remove(alias.alias().as_bytes())?;
|
self.alias_roomid.remove(alias.alias().as_bytes())?;
|
||||||
|
self.alias_userid.remove(alias.alias().as_bytes())
|
||||||
} else {
|
} else {
|
||||||
return Err(Error::BadRequest(
|
Err(Error::BadRequest(
|
||||||
ErrorKind::NotFound,
|
ErrorKind::NotFound,
|
||||||
"Alias does not exist.",
|
"Alias does not exist.",
|
||||||
));
|
))
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedRoomId>> {
|
fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedRoomId>> {
|
||||||
|
@ -57,4 +63,16 @@ impl service::rooms::alias::Data for KeyValueDatabase {
|
||||||
.map_err(|_| Error::bad_database("Invalid alias in aliasid_alias."))
|
.map_err(|_| Error::bad_database("Invalid alias in aliasid_alias."))
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn who_created_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedUserId>> {
|
||||||
|
self.alias_userid
|
||||||
|
.get(alias.alias().as_bytes())?
|
||||||
|
.map(|bytes| {
|
||||||
|
UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| {
|
||||||
|
Error::bad_database("User ID in alias_userid is invalid unicode.")
|
||||||
|
})?)
|
||||||
|
.map_err(|_| Error::bad_database("User ID in alias_roomid is invalid."))
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,24 +2,46 @@ use ruma::RoomId;
|
||||||
|
|
||||||
use crate::{database::KeyValueDatabase, service, services, utils, Result};
|
use crate::{database::KeyValueDatabase, service, services, utils, Result};
|
||||||
|
|
||||||
|
/// Splits a string into tokens used as keys in the search inverted index
|
||||||
|
///
|
||||||
|
/// This may be used to tokenize both message bodies (for indexing) or search
|
||||||
|
/// queries (for querying).
|
||||||
|
fn tokenize(body: &str) -> impl Iterator<Item = String> + '_ {
|
||||||
|
body.split_terminator(|c: char| !c.is_alphanumeric())
|
||||||
|
.filter(|s| !s.is_empty())
|
||||||
|
.filter(|word| word.len() <= 50)
|
||||||
|
.map(str::to_lowercase)
|
||||||
|
}
|
||||||
|
|
||||||
impl service::rooms::search::Data for KeyValueDatabase {
|
impl service::rooms::search::Data for KeyValueDatabase {
|
||||||
fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> {
|
fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> {
|
||||||
let mut batch = message_body
|
let mut batch = tokenize(message_body).map(|word| {
|
||||||
.split_terminator(|c: char| !c.is_alphanumeric())
|
let mut key = shortroomid.to_be_bytes().to_vec();
|
||||||
.filter(|s| !s.is_empty())
|
key.extend_from_slice(word.as_bytes());
|
||||||
.filter(|word| word.len() <= 50)
|
key.push(0xff);
|
||||||
.map(str::to_lowercase)
|
key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here
|
||||||
.map(|word| {
|
(key, Vec::new())
|
||||||
let mut key = shortroomid.to_be_bytes().to_vec();
|
});
|
||||||
key.extend_from_slice(word.as_bytes());
|
|
||||||
key.push(0xff);
|
|
||||||
key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here
|
|
||||||
(key, Vec::new())
|
|
||||||
});
|
|
||||||
|
|
||||||
self.tokenids.insert_batch(&mut batch)
|
self.tokenids.insert_batch(&mut batch)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn deindex_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> {
|
||||||
|
let batch = tokenize(message_body).map(|word| {
|
||||||
|
let mut key = shortroomid.to_be_bytes().to_vec();
|
||||||
|
key.extend_from_slice(word.as_bytes());
|
||||||
|
key.push(0xFF);
|
||||||
|
key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here
|
||||||
|
key
|
||||||
|
});
|
||||||
|
|
||||||
|
for token in batch {
|
||||||
|
self.tokenids.remove(&token)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
fn search_pdus<'a>(
|
fn search_pdus<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
|
@ -33,11 +55,7 @@ impl service::rooms::search::Data for KeyValueDatabase {
|
||||||
.to_be_bytes()
|
.to_be_bytes()
|
||||||
.to_vec();
|
.to_vec();
|
||||||
|
|
||||||
let words: Vec<_> = search_string
|
let words: Vec<_> = tokenize(search_string).collect();
|
||||||
.split_terminator(|c: char| !c.is_alphanumeric())
|
|
||||||
.filter(|s| !s.is_empty())
|
|
||||||
.map(str::to_lowercase)
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let iterators = words.clone().into_iter().map(move |word| {
|
let iterators = words.clone().into_iter().map(move |word| {
|
||||||
let mut prefix2 = prefix.clone();
|
let mut prefix2 = prefix.clone();
|
||||||
|
|
|
@ -80,7 +80,7 @@ impl service::uiaa::Data for KeyValueDatabase {
|
||||||
.userdevicesessionid_uiaainfo
|
.userdevicesessionid_uiaainfo
|
||||||
.get(&userdevicesessionid)?
|
.get(&userdevicesessionid)?
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"UIAA session does not exist.",
|
"UIAA session does not exist.",
|
||||||
))?,
|
))?,
|
||||||
)
|
)
|
||||||
|
|
|
@ -11,6 +11,7 @@ use ruma::{
|
||||||
use tracing::warn;
|
use tracing::warn;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
|
api::client_server::TOKEN_LENGTH,
|
||||||
database::KeyValueDatabase,
|
database::KeyValueDatabase,
|
||||||
service::{self, users::clean_signatures},
|
service::{self, users::clean_signatures},
|
||||||
services, utils, Error, Result,
|
services, utils, Error, Result,
|
||||||
|
@ -943,6 +944,52 @@ impl service::users::Data for KeyValueDatabase {
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Creates an OpenID token, which can be used to prove that a user has access to an account (primarily for integrations)
|
||||||
|
fn create_openid_token(&self, user_id: &UserId) -> Result<(String, u64)> {
|
||||||
|
let token = utils::random_string(TOKEN_LENGTH);
|
||||||
|
|
||||||
|
let expires_in = services().globals.config.openid_token_ttl;
|
||||||
|
let expires_at = utils::millis_since_unix_epoch()
|
||||||
|
.checked_add(expires_in * 1000)
|
||||||
|
.expect("time is valid");
|
||||||
|
|
||||||
|
let mut value = expires_at.to_be_bytes().to_vec();
|
||||||
|
value.extend_from_slice(user_id.as_bytes());
|
||||||
|
|
||||||
|
self.openidtoken_expiresatuserid
|
||||||
|
.insert(token.as_bytes(), value.as_slice())?;
|
||||||
|
|
||||||
|
Ok((token, expires_in))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Find out which user an OpenID access token belongs to.
|
||||||
|
fn find_from_openid_token(&self, token: &str) -> Result<Option<OwnedUserId>> {
|
||||||
|
let Some(value) = self.openidtoken_expiresatuserid.get(token.as_bytes())? else {
|
||||||
|
return Ok(None);
|
||||||
|
};
|
||||||
|
let (expires_at_bytes, user_bytes) = value.split_at(0u64.to_be_bytes().len());
|
||||||
|
|
||||||
|
let expires_at = u64::from_be_bytes(
|
||||||
|
expires_at_bytes
|
||||||
|
.try_into()
|
||||||
|
.map_err(|_| Error::bad_database("expires_at in openid_userid is invalid u64."))?,
|
||||||
|
);
|
||||||
|
|
||||||
|
if expires_at < utils::millis_since_unix_epoch() {
|
||||||
|
self.openidtoken_expiresatuserid.remove(token.as_bytes())?;
|
||||||
|
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(
|
||||||
|
UserId::parse(utils::string_from_bytes(user_bytes).map_err(|_| {
|
||||||
|
Error::bad_database("User ID in openid_userid is invalid unicode.")
|
||||||
|
})?)
|
||||||
|
.map_err(|_| Error::bad_database("User ID in openid_userid is invalid.")),
|
||||||
|
)
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyValueDatabase {}
|
impl KeyValueDatabase {}
|
||||||
|
|
|
@ -6,6 +6,7 @@ use crate::{
|
||||||
SERVICES,
|
SERVICES,
|
||||||
};
|
};
|
||||||
use abstraction::{KeyValueDatabaseEngine, KvTree};
|
use abstraction::{KeyValueDatabaseEngine, KvTree};
|
||||||
|
use base64::{engine::general_purpose, Engine};
|
||||||
use directories::ProjectDirs;
|
use directories::ProjectDirs;
|
||||||
use lru_cache::LruCache;
|
use lru_cache::LruCache;
|
||||||
|
|
||||||
|
@ -57,6 +58,7 @@ pub struct KeyValueDatabase {
|
||||||
pub(super) userid_masterkeyid: Arc<dyn KvTree>,
|
pub(super) userid_masterkeyid: Arc<dyn KvTree>,
|
||||||
pub(super) userid_selfsigningkeyid: Arc<dyn KvTree>,
|
pub(super) userid_selfsigningkeyid: Arc<dyn KvTree>,
|
||||||
pub(super) userid_usersigningkeyid: Arc<dyn KvTree>,
|
pub(super) userid_usersigningkeyid: Arc<dyn KvTree>,
|
||||||
|
pub(super) openidtoken_expiresatuserid: Arc<dyn KvTree>, // expiresatuserid = expiresat + userid
|
||||||
|
|
||||||
pub(super) userfilterid_filter: Arc<dyn KvTree>, // UserFilterId = UserId + FilterId
|
pub(super) userfilterid_filter: Arc<dyn KvTree>, // UserFilterId = UserId + FilterId
|
||||||
|
|
||||||
|
@ -100,6 +102,8 @@ pub struct KeyValueDatabase {
|
||||||
pub(super) userroomid_leftstate: Arc<dyn KvTree>,
|
pub(super) userroomid_leftstate: Arc<dyn KvTree>,
|
||||||
pub(super) roomuserid_leftcount: Arc<dyn KvTree>,
|
pub(super) roomuserid_leftcount: Arc<dyn KvTree>,
|
||||||
|
|
||||||
|
pub(super) alias_userid: Arc<dyn KvTree>, // User who created the alias
|
||||||
|
|
||||||
pub(super) disabledroomids: Arc<dyn KvTree>, // Rooms where incoming federation handling is disabled
|
pub(super) disabledroomids: Arc<dyn KvTree>, // Rooms where incoming federation handling is disabled
|
||||||
|
|
||||||
pub(super) lazyloadedids: Arc<dyn KvTree>, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId
|
pub(super) lazyloadedids: Arc<dyn KvTree>, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId
|
||||||
|
@ -290,6 +294,7 @@ impl KeyValueDatabase {
|
||||||
userid_masterkeyid: builder.open_tree("userid_masterkeyid")?,
|
userid_masterkeyid: builder.open_tree("userid_masterkeyid")?,
|
||||||
userid_selfsigningkeyid: builder.open_tree("userid_selfsigningkeyid")?,
|
userid_selfsigningkeyid: builder.open_tree("userid_selfsigningkeyid")?,
|
||||||
userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?,
|
userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?,
|
||||||
|
openidtoken_expiresatuserid: builder.open_tree("openidtoken_expiresatuserid")?,
|
||||||
userfilterid_filter: builder.open_tree("userfilterid_filter")?,
|
userfilterid_filter: builder.open_tree("userfilterid_filter")?,
|
||||||
todeviceid_events: builder.open_tree("todeviceid_events")?,
|
todeviceid_events: builder.open_tree("todeviceid_events")?,
|
||||||
|
|
||||||
|
@ -325,6 +330,8 @@ impl KeyValueDatabase {
|
||||||
userroomid_leftstate: builder.open_tree("userroomid_leftstate")?,
|
userroomid_leftstate: builder.open_tree("userroomid_leftstate")?,
|
||||||
roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?,
|
roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?,
|
||||||
|
|
||||||
|
alias_userid: builder.open_tree("alias_userid")?,
|
||||||
|
|
||||||
disabledroomids: builder.open_tree("disabledroomids")?,
|
disabledroomids: builder.open_tree("disabledroomids")?,
|
||||||
|
|
||||||
lazyloadedids: builder.open_tree("lazyloadedids")?,
|
lazyloadedids: builder.open_tree("lazyloadedids")?,
|
||||||
|
@ -404,11 +411,9 @@ impl KeyValueDatabase {
|
||||||
// Matrix resource ownership is based on the server name; changing it
|
// Matrix resource ownership is based on the server name; changing it
|
||||||
// requires recreating the database from scratch.
|
// requires recreating the database from scratch.
|
||||||
if services().users.count()? > 0 {
|
if services().users.count()? > 0 {
|
||||||
let conduit_user =
|
let conduit_user = services().globals.server_user();
|
||||||
UserId::parse_with_server_name("conduit", services().globals.server_name())
|
|
||||||
.expect("@conduit:server_name is valid");
|
|
||||||
|
|
||||||
if !services().users.exists(&conduit_user)? {
|
if !services().users.exists(conduit_user)? {
|
||||||
error!(
|
error!(
|
||||||
"The {} server user does not exist, and the database is not new.",
|
"The {} server user does not exist, and the database is not new.",
|
||||||
conduit_user
|
conduit_user
|
||||||
|
@ -420,7 +425,7 @@ impl KeyValueDatabase {
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the database has any data, perform data migrations before starting
|
// If the database has any data, perform data migrations before starting
|
||||||
let latest_database_version = 13;
|
let latest_database_version = 16;
|
||||||
|
|
||||||
if services().users.count()? > 0 {
|
if services().users.count()? > 0 {
|
||||||
// MIGRATIONS
|
// MIGRATIONS
|
||||||
|
@ -937,6 +942,86 @@ impl KeyValueDatabase {
|
||||||
warn!("Migration: 12 -> 13 finished");
|
warn!("Migration: 12 -> 13 finished");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if services().globals.database_version()? < 16 {
|
||||||
|
// Reconstruct all media using the filesystem
|
||||||
|
db.mediaid_file.clear().unwrap();
|
||||||
|
|
||||||
|
for file in fs::read_dir(services().globals.get_media_folder()).unwrap() {
|
||||||
|
let file = file.unwrap();
|
||||||
|
let mediaid = general_purpose::URL_SAFE_NO_PAD
|
||||||
|
.decode(file.file_name().into_string().unwrap())
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let mut parts = mediaid.rsplit(|&b| b == 0xff);
|
||||||
|
|
||||||
|
let mut removed_bytes = 0;
|
||||||
|
|
||||||
|
let content_type_bytes = parts.next().unwrap();
|
||||||
|
removed_bytes += content_type_bytes.len() + 1;
|
||||||
|
|
||||||
|
let content_disposition_bytes = parts
|
||||||
|
.next()
|
||||||
|
.ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?;
|
||||||
|
removed_bytes += content_disposition_bytes.len();
|
||||||
|
|
||||||
|
let mut content_disposition =
|
||||||
|
utils::string_from_bytes(content_disposition_bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Content Disposition in mediaid_file is invalid.")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
if content_disposition.contains("filename=")
|
||||||
|
&& !content_disposition.contains("filename=\"")
|
||||||
|
{
|
||||||
|
println!("{}", &content_disposition);
|
||||||
|
content_disposition =
|
||||||
|
content_disposition.replacen("filename=", "filename=\"", 1);
|
||||||
|
content_disposition.push('"');
|
||||||
|
println!("{}", &content_disposition);
|
||||||
|
|
||||||
|
let mut new_key = mediaid[..(mediaid.len() - removed_bytes)].to_vec();
|
||||||
|
assert!(*new_key.last().unwrap() == 0xff);
|
||||||
|
|
||||||
|
let mut shorter_key = new_key.clone();
|
||||||
|
shorter_key.extend(
|
||||||
|
ruma::http_headers::ContentDisposition::new(
|
||||||
|
ruma::http_headers::ContentDispositionType::Inline,
|
||||||
|
)
|
||||||
|
.to_string()
|
||||||
|
.as_bytes(),
|
||||||
|
);
|
||||||
|
shorter_key.push(0xff);
|
||||||
|
shorter_key.extend_from_slice(content_type_bytes);
|
||||||
|
|
||||||
|
new_key.extend_from_slice(content_disposition.to_string().as_bytes());
|
||||||
|
new_key.push(0xff);
|
||||||
|
new_key.extend_from_slice(content_type_bytes);
|
||||||
|
|
||||||
|
// Some file names are too long. Ignore those.
|
||||||
|
match fs::rename(
|
||||||
|
services().globals.get_media_file(&mediaid),
|
||||||
|
services().globals.get_media_file(&new_key),
|
||||||
|
) {
|
||||||
|
Ok(_) => {
|
||||||
|
db.mediaid_file.insert(&new_key, &[])?;
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
fs::rename(
|
||||||
|
services().globals.get_media_file(&mediaid),
|
||||||
|
services().globals.get_media_file(&shorter_key),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
db.mediaid_file.insert(&shorter_key, &[])?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
db.mediaid_file.insert(&mediaid, &[])?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
services().globals.bump_database_version(16)?;
|
||||||
|
|
||||||
|
warn!("Migration: 13 -> 16 finished");
|
||||||
|
}
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
services().globals.database_version().unwrap(),
|
services().globals.database_version().unwrap(),
|
||||||
latest_database_version
|
latest_database_version
|
||||||
|
@ -1102,22 +1187,21 @@ impl KeyValueDatabase {
|
||||||
|
|
||||||
/// Sets the emergency password and push rules for the @conduit account in case emergency password is set
|
/// Sets the emergency password and push rules for the @conduit account in case emergency password is set
|
||||||
fn set_emergency_access() -> Result<bool> {
|
fn set_emergency_access() -> Result<bool> {
|
||||||
let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name())
|
let conduit_user = services().globals.server_user();
|
||||||
.expect("@conduit:server_name is a valid UserId");
|
|
||||||
|
|
||||||
services().users.set_password(
|
services().users.set_password(
|
||||||
&conduit_user,
|
conduit_user,
|
||||||
services().globals.emergency_password().as_deref(),
|
services().globals.emergency_password().as_deref(),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let (ruleset, res) = match services().globals.emergency_password() {
|
let (ruleset, res) = match services().globals.emergency_password() {
|
||||||
Some(_) => (Ruleset::server_default(&conduit_user), Ok(true)),
|
Some(_) => (Ruleset::server_default(conduit_user), Ok(true)),
|
||||||
None => (Ruleset::new(), Ok(false)),
|
None => (Ruleset::new(), Ok(false)),
|
||||||
};
|
};
|
||||||
|
|
||||||
services().account_data.update(
|
services().account_data.update(
|
||||||
None,
|
None,
|
||||||
&conduit_user,
|
conduit_user,
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
&serde_json::to_value(&GlobalAccountDataEvent {
|
&serde_json::to_value(&GlobalAccountDataEvent {
|
||||||
content: PushRulesEventContent { global: ruleset },
|
content: PushRulesEventContent { global: ruleset },
|
||||||
|
|
52
src/main.rs
52
src/main.rs
|
@ -1,8 +1,10 @@
|
||||||
use std::{future::Future, io, net::SocketAddr, sync::atomic, time::Duration};
|
use std::{future::Future, io, net::SocketAddr, sync::atomic, time::Duration};
|
||||||
|
|
||||||
use axum::{
|
use axum::{
|
||||||
|
body::Body,
|
||||||
extract::{DefaultBodyLimit, FromRequestParts, MatchedPath},
|
extract::{DefaultBodyLimit, FromRequestParts, MatchedPath},
|
||||||
response::IntoResponse,
|
middleware::map_response,
|
||||||
|
response::{IntoResponse, Response},
|
||||||
routing::{any, get, on, MethodFilter},
|
routing::{any, get, on, MethodFilter},
|
||||||
Router,
|
Router,
|
||||||
};
|
};
|
||||||
|
@ -13,7 +15,7 @@ use figment::{
|
||||||
Figment,
|
Figment,
|
||||||
};
|
};
|
||||||
use http::{
|
use http::{
|
||||||
header::{self, HeaderName},
|
header::{self, HeaderName, CONTENT_SECURITY_POLICY},
|
||||||
Method, StatusCode, Uri,
|
Method, StatusCode, Uri,
|
||||||
};
|
};
|
||||||
use ruma::api::{
|
use ruma::api::{
|
||||||
|
@ -55,7 +57,7 @@ async fn main() {
|
||||||
))
|
))
|
||||||
.nested(),
|
.nested(),
|
||||||
)
|
)
|
||||||
.merge(Env::prefixed("CONDUIT_").global());
|
.merge(Env::prefixed("CONDUIT_").global().split("__"));
|
||||||
|
|
||||||
let config = match raw_config.extract::<Config>() {
|
let config = match raw_config.extract::<Config>() {
|
||||||
Ok(s) => s,
|
Ok(s) => s,
|
||||||
|
@ -68,11 +70,13 @@ async fn main() {
|
||||||
config.warn_deprecated();
|
config.warn_deprecated();
|
||||||
|
|
||||||
if config.allow_jaeger {
|
if config.allow_jaeger {
|
||||||
opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new());
|
opentelemetry::global::set_text_map_propagator(
|
||||||
let tracer = opentelemetry_jaeger::new_agent_pipeline()
|
opentelemetry_jaeger_propagator::Propagator::new(),
|
||||||
.with_auto_split_batch(true)
|
);
|
||||||
.with_service_name("conduit")
|
let tracer = opentelemetry_otlp::new_pipeline()
|
||||||
.install_batch(opentelemetry::runtime::Tokio)
|
.tracing()
|
||||||
|
.with_exporter(opentelemetry_otlp::new_exporter().tonic())
|
||||||
|
.install_batch(opentelemetry_sdk::runtime::Tokio)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
|
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
|
||||||
|
|
||||||
|
@ -141,6 +145,13 @@ async fn main() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Adds additional headers to prevent any potential XSS attacks via the media repo
|
||||||
|
async fn set_csp_header(response: Response) -> impl IntoResponse {
|
||||||
|
(
|
||||||
|
[(CONTENT_SECURITY_POLICY, "sandbox; default-src 'none'; script-src 'none'; plugin-types application/pdf; style-src 'unsafe-inline'; object-src 'self';")], response
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
async fn run_server() -> io::Result<()> {
|
async fn run_server() -> io::Result<()> {
|
||||||
let config = &services().globals.config;
|
let config = &services().globals.config;
|
||||||
let addr = SocketAddr::from((config.address, config.port));
|
let addr = SocketAddr::from((config.address, config.port));
|
||||||
|
@ -181,6 +192,7 @@ async fn run_server() -> io::Result<()> {
|
||||||
])
|
])
|
||||||
.max_age(Duration::from_secs(86400)),
|
.max_age(Duration::from_secs(86400)),
|
||||||
)
|
)
|
||||||
|
.layer(map_response(set_csp_header))
|
||||||
.layer(DefaultBodyLimit::max(
|
.layer(DefaultBodyLimit::max(
|
||||||
config
|
config
|
||||||
.max_request_size
|
.max_request_size
|
||||||
|
@ -216,10 +228,10 @@ async fn run_server() -> io::Result<()> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn spawn_task<B: Send + 'static>(
|
async fn spawn_task(
|
||||||
req: http::Request<B>,
|
req: http::Request<Body>,
|
||||||
next: axum::middleware::Next<B>,
|
next: axum::middleware::Next,
|
||||||
) -> std::result::Result<axum::response::Response, StatusCode> {
|
) -> std::result::Result<Response, StatusCode> {
|
||||||
if services().globals.shutdown.load(atomic::Ordering::Relaxed) {
|
if services().globals.shutdown.load(atomic::Ordering::Relaxed) {
|
||||||
return Err(StatusCode::SERVICE_UNAVAILABLE);
|
return Err(StatusCode::SERVICE_UNAVAILABLE);
|
||||||
}
|
}
|
||||||
|
@ -228,10 +240,10 @@ async fn spawn_task<B: Send + 'static>(
|
||||||
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)
|
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn unrecognized_method<B: Send>(
|
async fn unrecognized_method(
|
||||||
req: http::Request<B>,
|
req: http::Request<Body>,
|
||||||
next: axum::middleware::Next<B>,
|
next: axum::middleware::Next,
|
||||||
) -> std::result::Result<axum::response::Response, StatusCode> {
|
) -> std::result::Result<Response, StatusCode> {
|
||||||
let method = req.method().clone();
|
let method = req.method().clone();
|
||||||
let uri = req.uri().clone();
|
let uri = req.uri().clone();
|
||||||
let inner = next.run(req).await;
|
let inner = next.run(req).await;
|
||||||
|
@ -277,6 +289,7 @@ fn routes(config: &Config) -> Router {
|
||||||
.ruma_route(client_server::get_room_aliases_route)
|
.ruma_route(client_server::get_room_aliases_route)
|
||||||
.ruma_route(client_server::get_filter_route)
|
.ruma_route(client_server::get_filter_route)
|
||||||
.ruma_route(client_server::create_filter_route)
|
.ruma_route(client_server::create_filter_route)
|
||||||
|
.ruma_route(client_server::create_openid_token_route)
|
||||||
.ruma_route(client_server::set_global_account_data_route)
|
.ruma_route(client_server::set_global_account_data_route)
|
||||||
.ruma_route(client_server::set_room_account_data_route)
|
.ruma_route(client_server::set_room_account_data_route)
|
||||||
.ruma_route(client_server::get_global_account_data_route)
|
.ruma_route(client_server::get_global_account_data_route)
|
||||||
|
@ -366,10 +379,14 @@ fn routes(config: &Config) -> Router {
|
||||||
.ruma_route(client_server::turn_server_route)
|
.ruma_route(client_server::turn_server_route)
|
||||||
.ruma_route(client_server::send_event_to_device_route)
|
.ruma_route(client_server::send_event_to_device_route)
|
||||||
.ruma_route(client_server::get_media_config_route)
|
.ruma_route(client_server::get_media_config_route)
|
||||||
|
.ruma_route(client_server::get_media_config_auth_route)
|
||||||
.ruma_route(client_server::create_content_route)
|
.ruma_route(client_server::create_content_route)
|
||||||
.ruma_route(client_server::get_content_route)
|
.ruma_route(client_server::get_content_route)
|
||||||
|
.ruma_route(client_server::get_content_auth_route)
|
||||||
.ruma_route(client_server::get_content_as_filename_route)
|
.ruma_route(client_server::get_content_as_filename_route)
|
||||||
|
.ruma_route(client_server::get_content_as_filename_auth_route)
|
||||||
.ruma_route(client_server::get_content_thumbnail_route)
|
.ruma_route(client_server::get_content_thumbnail_route)
|
||||||
|
.ruma_route(client_server::get_content_thumbnail_auth_route)
|
||||||
.ruma_route(client_server::get_devices_route)
|
.ruma_route(client_server::get_devices_route)
|
||||||
.ruma_route(client_server::get_device_route)
|
.ruma_route(client_server::get_device_route)
|
||||||
.ruma_route(client_server::update_device_route)
|
.ruma_route(client_server::update_device_route)
|
||||||
|
@ -427,10 +444,13 @@ fn routes(config: &Config) -> Router {
|
||||||
.ruma_route(server_server::create_join_event_v2_route)
|
.ruma_route(server_server::create_join_event_v2_route)
|
||||||
.ruma_route(server_server::create_invite_route)
|
.ruma_route(server_server::create_invite_route)
|
||||||
.ruma_route(server_server::get_devices_route)
|
.ruma_route(server_server::get_devices_route)
|
||||||
|
.ruma_route(server_server::get_content_route)
|
||||||
|
.ruma_route(server_server::get_content_thumbnail_route)
|
||||||
.ruma_route(server_server::get_room_information_route)
|
.ruma_route(server_server::get_room_information_route)
|
||||||
.ruma_route(server_server::get_profile_information_route)
|
.ruma_route(server_server::get_profile_information_route)
|
||||||
.ruma_route(server_server::get_keys_route)
|
.ruma_route(server_server::get_keys_route)
|
||||||
.ruma_route(server_server::claim_keys_route)
|
.ruma_route(server_server::claim_keys_route)
|
||||||
|
.ruma_route(server_server::get_openid_userinfo_route)
|
||||||
.ruma_route(server_server::well_known_server)
|
.ruma_route(server_server::well_known_server)
|
||||||
} else {
|
} else {
|
||||||
router
|
router
|
||||||
|
|
|
@ -1,9 +1,4 @@
|
||||||
use std::{
|
use std::{collections::BTreeMap, convert::TryFrom, sync::Arc, time::Instant};
|
||||||
collections::BTreeMap,
|
|
||||||
convert::{TryFrom, TryInto},
|
|
||||||
sync::Arc,
|
|
||||||
time::Instant,
|
|
||||||
};
|
|
||||||
|
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
|
@ -24,7 +19,8 @@ use ruma::{
|
||||||
},
|
},
|
||||||
TimelineEventType,
|
TimelineEventType,
|
||||||
},
|
},
|
||||||
EventId, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
|
EventId, MilliSecondsSinceUnixEpoch, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId,
|
||||||
|
RoomVersionId, ServerName, UserId,
|
||||||
};
|
};
|
||||||
use serde_json::value::to_raw_value;
|
use serde_json::value::to_raw_value;
|
||||||
use tokio::sync::{mpsc, Mutex, RwLock};
|
use tokio::sync::{mpsc, Mutex, RwLock};
|
||||||
|
@ -77,6 +73,12 @@ enum AdminCommand {
|
||||||
/// List all rooms we are currently handling an incoming pdu from
|
/// List all rooms we are currently handling an incoming pdu from
|
||||||
IncomingFederation,
|
IncomingFederation,
|
||||||
|
|
||||||
|
/// Removes an alias from the server
|
||||||
|
RemoveAlias {
|
||||||
|
/// The alias to be removed
|
||||||
|
alias: Box<RoomAliasId>,
|
||||||
|
},
|
||||||
|
|
||||||
/// Deactivate a user
|
/// Deactivate a user
|
||||||
///
|
///
|
||||||
/// User will not be removed from all rooms by default.
|
/// User will not be removed from all rooms by default.
|
||||||
|
@ -160,24 +162,23 @@ enum AdminCommand {
|
||||||
password: Option<String>,
|
password: Option<String>,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
/// Temporarily toggle user registration by passing either true or false as an argument, does not persist between restarts
|
||||||
|
AllowRegistration { status: Option<bool> },
|
||||||
|
|
||||||
/// Disables incoming federation handling for a room.
|
/// Disables incoming federation handling for a room.
|
||||||
DisableRoom { room_id: Box<RoomId> },
|
DisableRoom { room_id: Box<RoomId> },
|
||||||
/// Enables incoming federation handling for a room again.
|
/// Enables incoming federation handling for a room again.
|
||||||
EnableRoom { room_id: Box<RoomId> },
|
EnableRoom { room_id: Box<RoomId> },
|
||||||
|
|
||||||
/// Verify json signatures
|
/// Sign a json object using Conduit's signing keys, putting the json in a codeblock
|
||||||
/// [commandbody]()
|
|
||||||
/// # ```
|
|
||||||
/// # json here
|
|
||||||
/// # ```
|
|
||||||
SignJson,
|
SignJson,
|
||||||
|
|
||||||
/// Verify json signatures
|
/// Verify json signatures, putting the json in a codeblock
|
||||||
/// [commandbody]()
|
|
||||||
/// # ```
|
|
||||||
/// # json here
|
|
||||||
/// # ```
|
|
||||||
VerifyJson,
|
VerifyJson,
|
||||||
|
|
||||||
|
/// Parses a JSON object as an event then creates a hash and signs it, putting a room
|
||||||
|
/// version as an argument, and the json in a codeblock
|
||||||
|
HashAndSignEvent { room_version_id: RoomVersionId },
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
@ -212,8 +213,7 @@ impl Service {
|
||||||
// TODO: Use futures when we have long admin commands
|
// TODO: Use futures when we have long admin commands
|
||||||
//let mut futures = FuturesUnordered::new();
|
//let mut futures = FuturesUnordered::new();
|
||||||
|
|
||||||
let conduit_user = UserId::parse(format!("@conduit:{}", services().globals.server_name()))
|
let conduit_user = services().globals.server_user();
|
||||||
.expect("@conduit:server_name is valid");
|
|
||||||
|
|
||||||
if let Ok(Some(conduit_room)) = services().admin.get_admin_room() {
|
if let Ok(Some(conduit_room)) = services().admin.get_admin_room() {
|
||||||
loop {
|
loop {
|
||||||
|
@ -246,8 +246,9 @@ impl Service {
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: None,
|
state_key: None,
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
&conduit_user,
|
conduit_user,
|
||||||
&conduit_room,
|
&conduit_room,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)
|
)
|
||||||
|
@ -660,6 +661,24 @@ impl Service {
|
||||||
"Created user with user_id: {user_id} and password: {password}"
|
"Created user with user_id: {user_id} and password: {password}"
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
AdminCommand::AllowRegistration { status } => {
|
||||||
|
if let Some(status) = status {
|
||||||
|
services().globals.set_registration(status).await;
|
||||||
|
RoomMessageEventContent::text_plain(if status {
|
||||||
|
"Registration is now enabled"
|
||||||
|
} else {
|
||||||
|
"Registration is now disabled"
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
RoomMessageEventContent::text_plain(
|
||||||
|
if services().globals.allow_registration().await {
|
||||||
|
"Registration is currently enabled"
|
||||||
|
} else {
|
||||||
|
"Registration is currently disabled"
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
AdminCommand::DisableRoom { room_id } => {
|
AdminCommand::DisableRoom { room_id } => {
|
||||||
services().rooms.metadata.disable_room(&room_id, true)?;
|
services().rooms.metadata.disable_room(&room_id, true)?;
|
||||||
RoomMessageEventContent::text_plain("Room disabled.")
|
RoomMessageEventContent::text_plain("Room disabled.")
|
||||||
|
@ -841,15 +860,46 @@ impl Service {
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.event_handler
|
.event_handler
|
||||||
|
// Generally we shouldn't be checking against expired keys unless required, so in the admin
|
||||||
|
// room it might be best to not allow expired keys
|
||||||
.fetch_required_signing_keys(&value, &pub_key_map)
|
.fetch_required_signing_keys(&value, &pub_key_map)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let pub_key_map = pub_key_map.read().await;
|
let mut expired_key_map = BTreeMap::new();
|
||||||
match ruma::signatures::verify_json(&pub_key_map, &value) {
|
let mut valid_key_map = BTreeMap::new();
|
||||||
Ok(_) => RoomMessageEventContent::text_plain("Signature correct"),
|
|
||||||
Err(e) => RoomMessageEventContent::text_plain(format!(
|
for (server, keys) in pub_key_map.into_inner().into_iter() {
|
||||||
|
if keys.valid_until_ts > MilliSecondsSinceUnixEpoch::now() {
|
||||||
|
valid_key_map.insert(
|
||||||
|
server,
|
||||||
|
keys.verify_keys
|
||||||
|
.into_iter()
|
||||||
|
.map(|(id, key)| (id, key.key))
|
||||||
|
.collect(),
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
expired_key_map.insert(
|
||||||
|
server,
|
||||||
|
keys.verify_keys
|
||||||
|
.into_iter()
|
||||||
|
.map(|(id, key)| (id, key.key))
|
||||||
|
.collect(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ruma::signatures::verify_json(&valid_key_map, &value).is_ok() {
|
||||||
|
RoomMessageEventContent::text_plain("Signature correct")
|
||||||
|
} else if let Err(e) =
|
||||||
|
ruma::signatures::verify_json(&expired_key_map, &value)
|
||||||
|
{
|
||||||
|
RoomMessageEventContent::text_plain(format!(
|
||||||
"Signature verification failed: {e}"
|
"Signature verification failed: {e}"
|
||||||
)),
|
))
|
||||||
|
} else {
|
||||||
|
RoomMessageEventContent::text_plain(
|
||||||
|
"Signature correct (with expired keys)",
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json: {e}")),
|
Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json: {e}")),
|
||||||
|
@ -860,6 +910,61 @@ impl Service {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
AdminCommand::HashAndSignEvent { room_version_id } => {
|
||||||
|
if body.len() > 2
|
||||||
|
// Language may be specified as part of the codeblock (e.g. "```json")
|
||||||
|
&& body[0].trim().starts_with("```")
|
||||||
|
&& body.last().unwrap().trim() == "```"
|
||||||
|
{
|
||||||
|
let string = body[1..body.len() - 1].join("\n");
|
||||||
|
match serde_json::from_str(&string) {
|
||||||
|
Ok(mut value) => {
|
||||||
|
if let Err(e) = ruma::signatures::hash_and_sign_event(
|
||||||
|
services().globals.server_name().as_str(),
|
||||||
|
services().globals.keypair(),
|
||||||
|
&mut value,
|
||||||
|
&room_version_id,
|
||||||
|
) {
|
||||||
|
RoomMessageEventContent::text_plain(format!("Invalid event: {e}"))
|
||||||
|
} else {
|
||||||
|
let json_text = serde_json::to_string_pretty(&value)
|
||||||
|
.expect("canonical json is valid json");
|
||||||
|
RoomMessageEventContent::text_plain(json_text)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json: {e}")),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
RoomMessageEventContent::text_plain(
|
||||||
|
"Expected code block in command body. Add --help for details.",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
AdminCommand::RemoveAlias { alias } => {
|
||||||
|
if alias.server_name() != services().globals.server_name() {
|
||||||
|
RoomMessageEventContent::text_plain(
|
||||||
|
"Cannot remove alias which is not from this server",
|
||||||
|
)
|
||||||
|
} else if services()
|
||||||
|
.rooms
|
||||||
|
.alias
|
||||||
|
.resolve_local_alias(&alias)?
|
||||||
|
.is_none()
|
||||||
|
{
|
||||||
|
RoomMessageEventContent::text_plain("No such alias exists")
|
||||||
|
} else {
|
||||||
|
// We execute this as the server user for two reasons
|
||||||
|
// 1. If the user can execute commands in the admin room, they can always remove the alias.
|
||||||
|
// 2. In the future, we are likely going to be able to allow users to execute commands via
|
||||||
|
// other methods, such as IPC, which would lead to us not knowing their user id
|
||||||
|
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.alias
|
||||||
|
.remove_alias(&alias, services().globals.server_user())?;
|
||||||
|
RoomMessageEventContent::text_plain("Alias removed sucessfully")
|
||||||
|
}
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(reply_message_content)
|
Ok(reply_message_content)
|
||||||
|
@ -967,11 +1072,9 @@ impl Service {
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
// Create a user for the server
|
// Create a user for the server
|
||||||
let conduit_user =
|
let conduit_user = services().globals.server_user();
|
||||||
UserId::parse_with_server_name("conduit", services().globals.server_name())
|
|
||||||
.expect("@conduit:server_name is valid");
|
|
||||||
|
|
||||||
services().users.create(&conduit_user, None)?;
|
services().users.create(conduit_user, None)?;
|
||||||
|
|
||||||
let room_version = services().globals.default_room_version();
|
let room_version = services().globals.default_room_version();
|
||||||
let mut content = match room_version {
|
let mut content = match room_version {
|
||||||
|
@ -984,7 +1087,7 @@ impl Service {
|
||||||
| RoomVersionId::V7
|
| RoomVersionId::V7
|
||||||
| RoomVersionId::V8
|
| RoomVersionId::V8
|
||||||
| RoomVersionId::V9
|
| RoomVersionId::V9
|
||||||
| RoomVersionId::V10 => RoomCreateEventContent::new_v1(conduit_user.clone()),
|
| RoomVersionId::V10 => RoomCreateEventContent::new_v1(conduit_user.to_owned()),
|
||||||
RoomVersionId::V11 => RoomCreateEventContent::new_v11(),
|
RoomVersionId::V11 => RoomCreateEventContent::new_v11(),
|
||||||
_ => unreachable!("Validity of room version already checked"),
|
_ => unreachable!("Validity of room version already checked"),
|
||||||
};
|
};
|
||||||
|
@ -1003,8 +1106,9 @@ impl Service {
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
&conduit_user,
|
conduit_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)
|
)
|
||||||
|
@ -1031,8 +1135,9 @@ impl Service {
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some(conduit_user.to_string()),
|
state_key: Some(conduit_user.to_string()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
&conduit_user,
|
conduit_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)
|
)
|
||||||
|
@ -1040,7 +1145,7 @@ impl Service {
|
||||||
|
|
||||||
// 3. Power levels
|
// 3. Power levels
|
||||||
let mut users = BTreeMap::new();
|
let mut users = BTreeMap::new();
|
||||||
users.insert(conduit_user.clone(), 100.into());
|
users.insert(conduit_user.to_owned(), 100.into());
|
||||||
|
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -1056,8 +1161,9 @@ impl Service {
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
&conduit_user,
|
conduit_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)
|
)
|
||||||
|
@ -1075,8 +1181,9 @@ impl Service {
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
&conduit_user,
|
conduit_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)
|
)
|
||||||
|
@ -1096,8 +1203,9 @@ impl Service {
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
&conduit_user,
|
conduit_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)
|
)
|
||||||
|
@ -1117,8 +1225,9 @@ impl Service {
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
&conduit_user,
|
conduit_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)
|
)
|
||||||
|
@ -1137,8 +1246,9 @@ impl Service {
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
&conduit_user,
|
conduit_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)
|
)
|
||||||
|
@ -1157,17 +1267,16 @@ impl Service {
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
&conduit_user,
|
conduit_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
// 6. Room alias
|
// 6. Room alias
|
||||||
let alias: OwnedRoomAliasId = format!("#admins:{}", services().globals.server_name())
|
let alias: OwnedRoomAliasId = services().globals.admin_alias().to_owned();
|
||||||
.try_into()
|
|
||||||
.expect("#admins:server_name is a valid alias name");
|
|
||||||
|
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -1183,14 +1292,18 @@ impl Service {
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
&conduit_user,
|
conduit_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
services().rooms.alias.set_alias(&alias, &room_id)?;
|
services()
|
||||||
|
.rooms
|
||||||
|
.alias
|
||||||
|
.set_alias(&alias, &room_id, conduit_user)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -1199,15 +1312,10 @@ impl Service {
|
||||||
///
|
///
|
||||||
/// Errors are propagated from the database, and will have None if there is no admin room
|
/// Errors are propagated from the database, and will have None if there is no admin room
|
||||||
pub(crate) fn get_admin_room(&self) -> Result<Option<OwnedRoomId>> {
|
pub(crate) fn get_admin_room(&self) -> Result<Option<OwnedRoomId>> {
|
||||||
let admin_room_alias: Box<RoomAliasId> =
|
|
||||||
format!("#admins:{}", services().globals.server_name())
|
|
||||||
.try_into()
|
|
||||||
.expect("#admins:server_name is a valid alias name");
|
|
||||||
|
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.alias
|
.alias
|
||||||
.resolve_local_alias(&admin_room_alias)
|
.resolve_local_alias(services().globals.admin_alias())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Invite the user to the conduit admin room.
|
/// Invite the user to the conduit admin room.
|
||||||
|
@ -1231,9 +1339,7 @@ impl Service {
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
// Use the server user to grant the new admin's power level
|
// Use the server user to grant the new admin's power level
|
||||||
let conduit_user =
|
let conduit_user = services().globals.server_user();
|
||||||
UserId::parse_with_server_name("conduit", services().globals.server_name())
|
|
||||||
.expect("@conduit:server_name is valid");
|
|
||||||
|
|
||||||
// Invite and join the real user
|
// Invite and join the real user
|
||||||
services()
|
services()
|
||||||
|
@ -1256,8 +1362,9 @@ impl Service {
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some(user_id.to_string()),
|
state_key: Some(user_id.to_string()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
&conduit_user,
|
conduit_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)
|
)
|
||||||
|
@ -1282,6 +1389,7 @@ impl Service {
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some(user_id.to_string()),
|
state_key: Some(user_id.to_string()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
user_id,
|
user_id,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
@ -1308,8 +1416,9 @@ impl Service {
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
&conduit_user,
|
conduit_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)
|
)
|
||||||
|
@ -1327,14 +1436,24 @@ impl Service {
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: None,
|
state_key: None,
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
},
|
},
|
||||||
&conduit_user,
|
conduit_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
).await?;
|
).await?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Checks whether a given user is an admin of this server
|
||||||
|
pub fn user_is_admin(&self, user_id: &UserId) -> Result<bool> {
|
||||||
|
let Some(admin_room) = self.get_admin_room()? else {
|
||||||
|
return Ok(false);
|
||||||
|
};
|
||||||
|
|
||||||
|
services().rooms.state_cache.is_joined(user_id, &admin_room)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|
|
@ -1,13 +1,71 @@
|
||||||
use std::collections::BTreeMap;
|
use std::{
|
||||||
|
collections::BTreeMap,
|
||||||
use async_trait::async_trait;
|
time::{Duration, SystemTime},
|
||||||
use ruma::{
|
|
||||||
api::federation::discovery::{ServerSigningKeys, VerifyKey},
|
|
||||||
signatures::Ed25519KeyPair,
|
|
||||||
DeviceId, OwnedServerSigningKeyId, ServerName, UserId,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::Result;
|
use crate::{services, Result};
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use ruma::{
|
||||||
|
api::federation::discovery::{OldVerifyKey, ServerSigningKeys, VerifyKey},
|
||||||
|
serde::Base64,
|
||||||
|
signatures::Ed25519KeyPair,
|
||||||
|
DeviceId, MilliSecondsSinceUnixEpoch, ServerName, UserId,
|
||||||
|
};
|
||||||
|
use serde::Deserialize;
|
||||||
|
|
||||||
|
/// Similar to ServerSigningKeys, but drops a few unnecessary fields we don't require post-validation
|
||||||
|
#[derive(Deserialize, Debug, Clone)]
|
||||||
|
pub struct SigningKeys {
|
||||||
|
pub verify_keys: BTreeMap<String, VerifyKey>,
|
||||||
|
pub old_verify_keys: BTreeMap<String, OldVerifyKey>,
|
||||||
|
pub valid_until_ts: MilliSecondsSinceUnixEpoch,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SigningKeys {
|
||||||
|
/// Creates the SigningKeys struct, using the keys of the current server
|
||||||
|
pub fn load_own_keys() -> Self {
|
||||||
|
let mut keys = Self {
|
||||||
|
verify_keys: BTreeMap::new(),
|
||||||
|
old_verify_keys: BTreeMap::new(),
|
||||||
|
valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time(
|
||||||
|
SystemTime::now() + Duration::from_secs(7 * 86400),
|
||||||
|
)
|
||||||
|
.expect("Should be valid until year 500,000,000"),
|
||||||
|
};
|
||||||
|
|
||||||
|
keys.verify_keys.insert(
|
||||||
|
format!("ed25519:{}", services().globals.keypair().version()),
|
||||||
|
VerifyKey {
|
||||||
|
key: Base64::new(services().globals.keypair.public_key().to_vec()),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
keys
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<ServerSigningKeys> for SigningKeys {
|
||||||
|
fn from(value: ServerSigningKeys) -> Self {
|
||||||
|
let ServerSigningKeys {
|
||||||
|
verify_keys,
|
||||||
|
old_verify_keys,
|
||||||
|
valid_until_ts,
|
||||||
|
..
|
||||||
|
} = value;
|
||||||
|
|
||||||
|
Self {
|
||||||
|
verify_keys: verify_keys
|
||||||
|
.into_iter()
|
||||||
|
.map(|(id, key)| (id.to_string(), key))
|
||||||
|
.collect(),
|
||||||
|
old_verify_keys: old_verify_keys
|
||||||
|
.into_iter()
|
||||||
|
.map(|(id, key)| (id.to_string(), key))
|
||||||
|
.collect(),
|
||||||
|
valid_until_ts,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait Data: Send + Sync {
|
pub trait Data: Send + Sync {
|
||||||
|
@ -21,17 +79,23 @@ pub trait Data: Send + Sync {
|
||||||
fn clear_caches(&self, amount: u32);
|
fn clear_caches(&self, amount: u32);
|
||||||
fn load_keypair(&self) -> Result<Ed25519KeyPair>;
|
fn load_keypair(&self) -> Result<Ed25519KeyPair>;
|
||||||
fn remove_keypair(&self) -> Result<()>;
|
fn remove_keypair(&self) -> Result<()>;
|
||||||
fn add_signing_key(
|
/// Only extends the cached keys, not moving any verify_keys to old_verify_keys, as if we suddenly
|
||||||
|
/// recieve requests from the origin server, we want to be able to accept requests from them
|
||||||
|
fn add_signing_key_from_trusted_server(
|
||||||
&self,
|
&self,
|
||||||
origin: &ServerName,
|
origin: &ServerName,
|
||||||
new_keys: ServerSigningKeys,
|
new_keys: ServerSigningKeys,
|
||||||
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>>;
|
) -> Result<SigningKeys>;
|
||||||
|
/// Extends cached keys, as well as moving verify_keys that are not present in these new keys to
|
||||||
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server.
|
/// old_verify_keys, so that potnetially comprimised keys cannot be used to make requests
|
||||||
fn signing_keys_for(
|
fn add_signing_key_from_origin(
|
||||||
&self,
|
&self,
|
||||||
origin: &ServerName,
|
origin: &ServerName,
|
||||||
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>>;
|
new_keys: ServerSigningKeys,
|
||||||
|
) -> Result<SigningKeys>;
|
||||||
|
|
||||||
|
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server.
|
||||||
|
fn signing_keys_for(&self, origin: &ServerName) -> Result<Option<SigningKeys>>;
|
||||||
fn database_version(&self) -> Result<u64>;
|
fn database_version(&self) -> Result<u64>;
|
||||||
fn bump_database_version(&self, new_version: u64) -> Result<()>;
|
fn bump_database_version(&self, new_version: u64) -> Result<()>;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,25 +1,19 @@
|
||||||
mod data;
|
mod data;
|
||||||
pub use data::Data;
|
pub use data::{Data, SigningKeys};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
serde::Base64, OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedServerName,
|
serde::Base64, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedEventId, OwnedRoomAliasId,
|
||||||
OwnedServerSigningKeyId, OwnedUserId,
|
OwnedRoomId, OwnedServerName, OwnedUserId, RoomAliasId,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::api::server_server::FedDest;
|
use crate::api::server_server::DestinationResponse;
|
||||||
|
|
||||||
use crate::{services, Config, Error, Result};
|
use crate::{services, Config, Error, Result};
|
||||||
use futures_util::FutureExt;
|
use futures_util::FutureExt;
|
||||||
use hickory_resolver::TokioAsyncResolver;
|
use hickory_resolver::TokioAsyncResolver;
|
||||||
use hyper::{
|
use hyper_util::client::legacy::connect::dns::{GaiResolver, Name as HyperName};
|
||||||
client::connect::dns::{GaiResolver, Name},
|
use reqwest::dns::{Addrs, Name, Resolve, Resolving};
|
||||||
service::Service as HyperService,
|
|
||||||
};
|
|
||||||
use reqwest::dns::{Addrs, Resolve, Resolving};
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{
|
api::{client::sync::sync_events, federation::discovery::ServerSigningKeys},
|
||||||
client::sync::sync_events,
|
|
||||||
federation::discovery::{ServerSigningKeys, VerifyKey},
|
|
||||||
},
|
|
||||||
DeviceId, RoomVersionId, ServerName, UserId,
|
DeviceId, RoomVersionId, ServerName, UserId,
|
||||||
};
|
};
|
||||||
use std::{
|
use std::{
|
||||||
|
@ -30,6 +24,7 @@ use std::{
|
||||||
iter,
|
iter,
|
||||||
net::{IpAddr, SocketAddr},
|
net::{IpAddr, SocketAddr},
|
||||||
path::PathBuf,
|
path::PathBuf,
|
||||||
|
str::FromStr,
|
||||||
sync::{
|
sync::{
|
||||||
atomic::{self, AtomicBool},
|
atomic::{self, AtomicBool},
|
||||||
Arc, RwLock as StdRwLock,
|
Arc, RwLock as StdRwLock,
|
||||||
|
@ -37,11 +32,12 @@ use std::{
|
||||||
time::{Duration, Instant},
|
time::{Duration, Instant},
|
||||||
};
|
};
|
||||||
use tokio::sync::{broadcast, watch::Receiver, Mutex, RwLock, Semaphore};
|
use tokio::sync::{broadcast, watch::Receiver, Mutex, RwLock, Semaphore};
|
||||||
|
use tower_service::Service as TowerService;
|
||||||
use tracing::{error, info};
|
use tracing::{error, info};
|
||||||
|
|
||||||
use base64::{engine::general_purpose, Engine as _};
|
use base64::{engine::general_purpose, Engine as _};
|
||||||
|
|
||||||
type WellKnownMap = HashMap<OwnedServerName, (FedDest, String)>;
|
type WellKnownMap = HashMap<OwnedServerName, DestinationResponse>;
|
||||||
type TlsNameMap = HashMap<String, (Vec<IpAddr>, u16)>;
|
type TlsNameMap = HashMap<String, (Vec<IpAddr>, u16)>;
|
||||||
type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries
|
type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries
|
||||||
type SyncHandle = (
|
type SyncHandle = (
|
||||||
|
@ -55,6 +51,7 @@ pub struct Service {
|
||||||
pub actual_destination_cache: Arc<RwLock<WellKnownMap>>, // actual_destination, host
|
pub actual_destination_cache: Arc<RwLock<WellKnownMap>>, // actual_destination, host
|
||||||
pub tls_name_override: Arc<StdRwLock<TlsNameMap>>,
|
pub tls_name_override: Arc<StdRwLock<TlsNameMap>>,
|
||||||
pub config: Config,
|
pub config: Config,
|
||||||
|
allow_registration: RwLock<bool>,
|
||||||
keypair: Arc<ruma::signatures::Ed25519KeyPair>,
|
keypair: Arc<ruma::signatures::Ed25519KeyPair>,
|
||||||
dns_resolver: TokioAsyncResolver,
|
dns_resolver: TokioAsyncResolver,
|
||||||
jwt_decoding_key: Option<jsonwebtoken::DecodingKey>,
|
jwt_decoding_key: Option<jsonwebtoken::DecodingKey>,
|
||||||
|
@ -71,6 +68,8 @@ pub struct Service {
|
||||||
pub roomid_mutex_state: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>,
|
pub roomid_mutex_state: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>,
|
||||||
pub roomid_mutex_federation: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>, // this lock will be held longer
|
pub roomid_mutex_federation: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>, // this lock will be held longer
|
||||||
pub roomid_federationhandletime: RwLock<HashMap<OwnedRoomId, (OwnedEventId, Instant)>>,
|
pub roomid_federationhandletime: RwLock<HashMap<OwnedRoomId, (OwnedEventId, Instant)>>,
|
||||||
|
server_user: OwnedUserId,
|
||||||
|
admin_alias: OwnedRoomAliasId,
|
||||||
pub stateres_mutex: Arc<Mutex<()>>,
|
pub stateres_mutex: Arc<Mutex<()>>,
|
||||||
pub rotate: RotationHandler,
|
pub rotate: RotationHandler,
|
||||||
|
|
||||||
|
@ -137,11 +136,19 @@ impl Resolve for Resolver {
|
||||||
})
|
})
|
||||||
.unwrap_or_else(|| {
|
.unwrap_or_else(|| {
|
||||||
let this = &mut self.inner.clone();
|
let this = &mut self.inner.clone();
|
||||||
Box::pin(HyperService::<Name>::call(this, name).map(|result| {
|
Box::pin(
|
||||||
result
|
TowerService::<HyperName>::call(
|
||||||
.map(|addrs| -> Addrs { Box::new(addrs) })
|
this,
|
||||||
.map_err(|err| -> Box<dyn StdError + Send + Sync> { Box::new(err) })
|
// Beautiful hack, please remove this in the future.
|
||||||
}))
|
HyperName::from_str(name.as_str())
|
||||||
|
.expect("reqwest Name is just wrapper for hyper-util Name"),
|
||||||
|
)
|
||||||
|
.map(|result| {
|
||||||
|
result
|
||||||
|
.map(|addrs| -> Addrs { Box::new(addrs) })
|
||||||
|
.map_err(|err| -> Box<dyn StdError + Send + Sync> { Box::new(err) })
|
||||||
|
}),
|
||||||
|
)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -184,6 +191,11 @@ impl Service {
|
||||||
let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5];
|
let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5];
|
||||||
|
|
||||||
let mut s = Self {
|
let mut s = Self {
|
||||||
|
allow_registration: RwLock::new(config.allow_registration),
|
||||||
|
admin_alias: RoomAliasId::parse(format!("#admins:{}", &config.server_name))
|
||||||
|
.expect("#admins:server_name is a valid alias name"),
|
||||||
|
server_user: UserId::parse(format!("@conduit:{}", &config.server_name))
|
||||||
|
.expect("@conduit:server_name is valid"),
|
||||||
db,
|
db,
|
||||||
config,
|
config,
|
||||||
keypair: Arc::new(keypair),
|
keypair: Arc::new(keypair),
|
||||||
|
@ -277,6 +289,14 @@ impl Service {
|
||||||
self.config.server_name.as_ref()
|
self.config.server_name.as_ref()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn server_user(&self) -> &UserId {
|
||||||
|
self.server_user.as_ref()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn admin_alias(&self) -> &RoomAliasId {
|
||||||
|
self.admin_alias.as_ref()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn max_request_size(&self) -> u32 {
|
pub fn max_request_size(&self) -> u32 {
|
||||||
self.config.max_request_size
|
self.config.max_request_size
|
||||||
}
|
}
|
||||||
|
@ -285,8 +305,15 @@ impl Service {
|
||||||
self.config.max_fetch_prev_events
|
self.config.max_fetch_prev_events
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn allow_registration(&self) -> bool {
|
/// Allows for the temporary (non-persistant) toggling of registration
|
||||||
self.config.allow_registration
|
pub async fn set_registration(&self, status: bool) {
|
||||||
|
let mut lock = self.allow_registration.write().await;
|
||||||
|
*lock = status;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks whether user registration is allowed
|
||||||
|
pub async fn allow_registration(&self) -> bool {
|
||||||
|
*self.allow_registration.read().await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn allow_encryption(&self) -> bool {
|
pub fn allow_encryption(&self) -> bool {
|
||||||
|
@ -362,36 +389,89 @@ impl Service {
|
||||||
room_versions
|
room_versions
|
||||||
}
|
}
|
||||||
|
|
||||||
/// TODO: the key valid until timestamp is only honored in room version > 4
|
|
||||||
/// Remove the outdated keys and insert the new ones.
|
|
||||||
///
|
|
||||||
/// This doesn't actually check that the keys provided are newer than the old set.
|
/// This doesn't actually check that the keys provided are newer than the old set.
|
||||||
pub fn add_signing_key(
|
pub fn add_signing_key_from_trusted_server(
|
||||||
&self,
|
&self,
|
||||||
origin: &ServerName,
|
origin: &ServerName,
|
||||||
new_keys: ServerSigningKeys,
|
new_keys: ServerSigningKeys,
|
||||||
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
|
) -> Result<SigningKeys> {
|
||||||
self.db.add_signing_key(origin, new_keys)
|
self.db
|
||||||
|
.add_signing_key_from_trusted_server(origin, new_keys)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server.
|
/// Same as from_trusted_server, except it will move active keys not present in `new_keys` to old_signing_keys
|
||||||
pub fn signing_keys_for(
|
pub fn add_signing_key_from_origin(
|
||||||
&self,
|
&self,
|
||||||
origin: &ServerName,
|
origin: &ServerName,
|
||||||
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
|
new_keys: ServerSigningKeys,
|
||||||
let mut keys = self.db.signing_keys_for(origin)?;
|
) -> Result<SigningKeys> {
|
||||||
if origin == self.server_name() {
|
self.db.add_signing_key_from_origin(origin, new_keys)
|
||||||
keys.insert(
|
}
|
||||||
format!("ed25519:{}", services().globals.keypair().version())
|
|
||||||
.try_into()
|
|
||||||
.expect("found invalid server signing keys in DB"),
|
|
||||||
VerifyKey {
|
|
||||||
key: Base64::new(self.keypair.public_key().to_vec()),
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(keys)
|
/// This returns Ok(None) when there are no keys found for the server.
|
||||||
|
pub fn signing_keys_for(&self, origin: &ServerName) -> Result<Option<SigningKeys>> {
|
||||||
|
Ok(self.db.signing_keys_for(origin)?.or_else(|| {
|
||||||
|
if origin == self.server_name() {
|
||||||
|
Some(SigningKeys::load_own_keys())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Filters the key map of multiple servers down to keys that should be accepted given the expiry time,
|
||||||
|
/// room version, and timestamp of the paramters
|
||||||
|
pub fn filter_keys_server_map(
|
||||||
|
&self,
|
||||||
|
keys: BTreeMap<String, SigningKeys>,
|
||||||
|
timestamp: MilliSecondsSinceUnixEpoch,
|
||||||
|
room_version_id: &RoomVersionId,
|
||||||
|
) -> BTreeMap<String, BTreeMap<String, Base64>> {
|
||||||
|
keys.into_iter()
|
||||||
|
.filter_map(|(server, keys)| {
|
||||||
|
self.filter_keys_single_server(keys, timestamp, room_version_id)
|
||||||
|
.map(|keys| (server, keys))
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Filters the keys of a single server down to keys that should be accepted given the expiry time,
|
||||||
|
/// room version, and timestamp of the paramters
|
||||||
|
pub fn filter_keys_single_server(
|
||||||
|
&self,
|
||||||
|
keys: SigningKeys,
|
||||||
|
timestamp: MilliSecondsSinceUnixEpoch,
|
||||||
|
room_version_id: &RoomVersionId,
|
||||||
|
) -> Option<BTreeMap<String, Base64>> {
|
||||||
|
if keys.valid_until_ts > timestamp
|
||||||
|
// valid_until_ts MUST be ignored in room versions 1, 2, 3, and 4.
|
||||||
|
// https://spec.matrix.org/v1.10/server-server-api/#get_matrixkeyv2server
|
||||||
|
|| matches!(room_version_id, RoomVersionId::V1
|
||||||
|
| RoomVersionId::V2
|
||||||
|
| RoomVersionId::V4
|
||||||
|
| RoomVersionId::V3)
|
||||||
|
{
|
||||||
|
// Given that either the room version allows stale keys, or the valid_until_ts is
|
||||||
|
// in the future, all verify_keys are valid
|
||||||
|
let mut map: BTreeMap<_, _> = keys
|
||||||
|
.verify_keys
|
||||||
|
.into_iter()
|
||||||
|
.map(|(id, key)| (id, key.key))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
map.extend(keys.old_verify_keys.into_iter().filter_map(|(id, key)| {
|
||||||
|
// Even on old room versions, we don't allow old keys if they are expired
|
||||||
|
if key.expired_ts > timestamp {
|
||||||
|
Some((id, key.key))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
|
||||||
|
Some(map)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn database_version(&self) -> Result<u64> {
|
pub fn database_version(&self) -> Result<u64> {
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
use ruma::http_headers::ContentDisposition;
|
||||||
|
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|
||||||
pub trait Data: Send + Sync {
|
pub trait Data: Send + Sync {
|
||||||
|
@ -6,7 +8,7 @@ pub trait Data: Send + Sync {
|
||||||
mxc: String,
|
mxc: String,
|
||||||
width: u32,
|
width: u32,
|
||||||
height: u32,
|
height: u32,
|
||||||
content_disposition: Option<&str>,
|
content_disposition: &ContentDisposition,
|
||||||
content_type: Option<&str>,
|
content_type: Option<&str>,
|
||||||
) -> Result<Vec<u8>>;
|
) -> Result<Vec<u8>>;
|
||||||
|
|
||||||
|
@ -16,5 +18,5 @@ pub trait Data: Send + Sync {
|
||||||
mxc: String,
|
mxc: String,
|
||||||
width: u32,
|
width: u32,
|
||||||
height: u32,
|
height: u32,
|
||||||
) -> Result<(Option<String>, Option<String>, Vec<u8>)>;
|
) -> Result<(ContentDisposition, Option<String>, Vec<u8>)>;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,6 +2,7 @@ mod data;
|
||||||
use std::io::Cursor;
|
use std::io::Cursor;
|
||||||
|
|
||||||
pub use data::Data;
|
pub use data::Data;
|
||||||
|
use ruma::http_headers::{ContentDisposition, ContentDispositionType};
|
||||||
|
|
||||||
use crate::{services, Result};
|
use crate::{services, Result};
|
||||||
use image::imageops::FilterType;
|
use image::imageops::FilterType;
|
||||||
|
@ -12,7 +13,7 @@ use tokio::{
|
||||||
};
|
};
|
||||||
|
|
||||||
pub struct FileMeta {
|
pub struct FileMeta {
|
||||||
pub content_disposition: Option<String>,
|
pub content_disposition: ContentDisposition,
|
||||||
pub content_type: Option<String>,
|
pub content_type: Option<String>,
|
||||||
pub file: Vec<u8>,
|
pub file: Vec<u8>,
|
||||||
}
|
}
|
||||||
|
@ -26,14 +27,17 @@ impl Service {
|
||||||
pub async fn create(
|
pub async fn create(
|
||||||
&self,
|
&self,
|
||||||
mxc: String,
|
mxc: String,
|
||||||
content_disposition: Option<&str>,
|
content_disposition: Option<ContentDisposition>,
|
||||||
content_type: Option<&str>,
|
content_type: Option<&str>,
|
||||||
file: &[u8],
|
file: &[u8],
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
|
let content_disposition =
|
||||||
|
content_disposition.unwrap_or(ContentDisposition::new(ContentDispositionType::Inline));
|
||||||
|
|
||||||
// Width, Height = 0 if it's not a thumbnail
|
// Width, Height = 0 if it's not a thumbnail
|
||||||
let key = self
|
let key = self
|
||||||
.db
|
.db
|
||||||
.create_file_metadata(mxc, 0, 0, content_disposition, content_type)?;
|
.create_file_metadata(mxc, 0, 0, &content_disposition, content_type)?;
|
||||||
|
|
||||||
let path = services().globals.get_media_file(&key);
|
let path = services().globals.get_media_file(&key);
|
||||||
let mut f = File::create(path).await?;
|
let mut f = File::create(path).await?;
|
||||||
|
@ -46,15 +50,18 @@ impl Service {
|
||||||
pub async fn upload_thumbnail(
|
pub async fn upload_thumbnail(
|
||||||
&self,
|
&self,
|
||||||
mxc: String,
|
mxc: String,
|
||||||
content_disposition: Option<&str>,
|
|
||||||
content_type: Option<&str>,
|
content_type: Option<&str>,
|
||||||
width: u32,
|
width: u32,
|
||||||
height: u32,
|
height: u32,
|
||||||
file: &[u8],
|
file: &[u8],
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let key =
|
let key = self.db.create_file_metadata(
|
||||||
self.db
|
mxc,
|
||||||
.create_file_metadata(mxc, width, height, content_disposition, content_type)?;
|
width,
|
||||||
|
height,
|
||||||
|
&ContentDisposition::new(ContentDispositionType::Inline),
|
||||||
|
content_type,
|
||||||
|
)?;
|
||||||
|
|
||||||
let path = services().globals.get_media_file(&key);
|
let path = services().globals.get_media_file(&key);
|
||||||
let mut f = File::create(path).await?;
|
let mut f = File::create(path).await?;
|
||||||
|
@ -166,22 +173,20 @@ impl Service {
|
||||||
/ u64::from(original_height)
|
/ u64::from(original_height)
|
||||||
};
|
};
|
||||||
if use_width {
|
if use_width {
|
||||||
if intermediate <= u64::from(::std::u32::MAX) {
|
if intermediate <= u64::from(u32::MAX) {
|
||||||
(width, intermediate as u32)
|
(width, intermediate as u32)
|
||||||
} else {
|
} else {
|
||||||
(
|
(
|
||||||
(u64::from(width) * u64::from(::std::u32::MAX) / intermediate)
|
(u64::from(width) * u64::from(u32::MAX) / intermediate) as u32,
|
||||||
as u32,
|
u32::MAX,
|
||||||
::std::u32::MAX,
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
} else if intermediate <= u64::from(::std::u32::MAX) {
|
} else if intermediate <= u64::from(u32::MAX) {
|
||||||
(intermediate as u32, height)
|
(intermediate as u32, height)
|
||||||
} else {
|
} else {
|
||||||
(
|
(
|
||||||
::std::u32::MAX,
|
u32::MAX,
|
||||||
(u64::from(height) * u64::from(::std::u32::MAX) / intermediate)
|
(u64::from(height) * u64::from(u32::MAX) / intermediate) as u32,
|
||||||
as u32,
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -200,7 +205,7 @@ impl Service {
|
||||||
mxc,
|
mxc,
|
||||||
width,
|
width,
|
||||||
height,
|
height,
|
||||||
content_disposition.as_deref(),
|
&content_disposition,
|
||||||
content_type.as_deref(),
|
content_type.as_deref(),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
use crate::Error;
|
use crate::Error;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
|
api::client::error::ErrorKind,
|
||||||
canonical_json::redact_content_in_place,
|
canonical_json::redact_content_in_place,
|
||||||
events::{
|
events::{
|
||||||
room::{member::RoomMemberEventContent, redaction::RoomRedactionEventContent},
|
room::{member::RoomMemberEventContent, redaction::RoomRedactionEventContent},
|
||||||
|
@ -72,6 +73,23 @@ impl PduEvent {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn is_redacted(&self) -> bool {
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct ExtractRedactedBecause {
|
||||||
|
redacted_because: Option<serde::de::IgnoredAny>,
|
||||||
|
}
|
||||||
|
|
||||||
|
let Some(unsigned) = &self.unsigned else {
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
let Ok(unsigned) = ExtractRedactedBecause::deserialize(&**unsigned) else {
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
unsigned.redacted_because.is_some()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn remove_transaction_id(&mut self) -> crate::Result<()> {
|
pub fn remove_transaction_id(&mut self) -> crate::Result<()> {
|
||||||
if let Some(unsigned) = &self.unsigned {
|
if let Some(unsigned) = &self.unsigned {
|
||||||
let mut unsigned: BTreeMap<String, Box<RawJsonValue>> =
|
let mut unsigned: BTreeMap<String, Box<RawJsonValue>> =
|
||||||
|
@ -426,7 +444,7 @@ pub(crate) fn gen_event_id_canonical_json(
|
||||||
"${}",
|
"${}",
|
||||||
// Anything higher than version3 behaves the same
|
// Anything higher than version3 behaves the same
|
||||||
ruma::signatures::reference_hash(&value, room_version_id)
|
ruma::signatures::reference_hash(&value, room_version_id)
|
||||||
.expect("ruma can calculate reference hashes")
|
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid PDU format"))?
|
||||||
)
|
)
|
||||||
.try_into()
|
.try_into()
|
||||||
.expect("ruma's reference hashes are valid event ids");
|
.expect("ruma's reference hashes are valid event ids");
|
||||||
|
@ -443,4 +461,8 @@ pub struct PduBuilder {
|
||||||
pub unsigned: Option<BTreeMap<String, serde_json::Value>>,
|
pub unsigned: Option<BTreeMap<String, serde_json::Value>>,
|
||||||
pub state_key: Option<String>,
|
pub state_key: Option<String>,
|
||||||
pub redacts: Option<Arc<EventId>>,
|
pub redacts: Option<Arc<EventId>>,
|
||||||
|
/// For timestamped messaging, should only be used for appservices
|
||||||
|
///
|
||||||
|
/// Will be set to current time if None
|
||||||
|
pub timestamp: Option<MilliSecondsSinceUnixEpoch>,
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,9 +1,12 @@
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
use ruma::{OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId};
|
use ruma::{OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId, UserId};
|
||||||
|
|
||||||
pub trait Data: Send + Sync {
|
pub trait Data: Send + Sync {
|
||||||
/// Creates or updates the alias to the given room id.
|
/// Creates or updates the alias to the given room id.
|
||||||
fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()>;
|
fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()>;
|
||||||
|
|
||||||
|
/// Finds the user who assigned the given alias to a room
|
||||||
|
fn who_created_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedUserId>>;
|
||||||
|
|
||||||
/// Forgets about an alias. Returns an error if the alias did not exist.
|
/// Forgets about an alias. Returns an error if the alias did not exist.
|
||||||
fn remove_alias(&self, alias: &RoomAliasId) -> Result<()>;
|
fn remove_alias(&self, alias: &RoomAliasId) -> Result<()>;
|
||||||
|
|
|
@ -1,9 +1,17 @@
|
||||||
mod data;
|
mod data;
|
||||||
|
|
||||||
pub use data::Data;
|
pub use data::Data;
|
||||||
|
use tracing::error;
|
||||||
|
|
||||||
use crate::Result;
|
use crate::{services, Error, Result};
|
||||||
use ruma::{OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId};
|
use ruma::{
|
||||||
|
api::client::error::ErrorKind,
|
||||||
|
events::{
|
||||||
|
room::power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
|
||||||
|
StateEventType,
|
||||||
|
},
|
||||||
|
OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, UserId,
|
||||||
|
};
|
||||||
|
|
||||||
pub struct Service {
|
pub struct Service {
|
||||||
pub db: &'static dyn Data,
|
pub db: &'static dyn Data,
|
||||||
|
@ -11,13 +19,71 @@ pub struct Service {
|
||||||
|
|
||||||
impl Service {
|
impl Service {
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
pub fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> {
|
pub fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()> {
|
||||||
self.db.set_alias(alias, room_id)
|
if alias == services().globals.admin_alias() && user_id != services().globals.server_user()
|
||||||
|
{
|
||||||
|
Err(Error::BadRequest(
|
||||||
|
ErrorKind::forbidden(),
|
||||||
|
"Only the server user can set this alias",
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
self.db.set_alias(alias, room_id, user_id)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
pub fn remove_alias(&self, alias: &RoomAliasId) -> Result<()> {
|
fn user_can_remove_alias(&self, alias: &RoomAliasId, user_id: &UserId) -> Result<bool> {
|
||||||
self.db.remove_alias(alias)
|
let Some(room_id) = self.resolve_local_alias(alias)? else {
|
||||||
|
return Err(Error::BadRequest(ErrorKind::NotFound, "Alias not found."));
|
||||||
|
};
|
||||||
|
|
||||||
|
// The creator of an alias can remove it
|
||||||
|
if self
|
||||||
|
.db
|
||||||
|
.who_created_alias(alias)?
|
||||||
|
.map(|user| user == user_id)
|
||||||
|
.unwrap_or_default()
|
||||||
|
// Server admins can remove any local alias
|
||||||
|
|| services().admin.user_is_admin(user_id)?
|
||||||
|
// Always allow the Conduit user to remove the alias, since there may not be an admin room
|
||||||
|
|| services().globals.server_user ()== user_id
|
||||||
|
{
|
||||||
|
Ok(true)
|
||||||
|
// Checking whether the user is able to change canonical aliases of the room
|
||||||
|
} else if let Some(event) = services().rooms.state_accessor.room_state_get(
|
||||||
|
&room_id,
|
||||||
|
&StateEventType::RoomPowerLevels,
|
||||||
|
"",
|
||||||
|
)? {
|
||||||
|
serde_json::from_str(event.content.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid event content for m.room.power_levels"))
|
||||||
|
.map(|content: RoomPowerLevelsEventContent| {
|
||||||
|
RoomPowerLevels::from(content)
|
||||||
|
.user_can_send_state(user_id, StateEventType::RoomCanonicalAlias)
|
||||||
|
})
|
||||||
|
// If there is no power levels event, only the room creator can change canonical aliases
|
||||||
|
} else if let Some(event) = services().rooms.state_accessor.room_state_get(
|
||||||
|
&room_id,
|
||||||
|
&StateEventType::RoomCreate,
|
||||||
|
"",
|
||||||
|
)? {
|
||||||
|
Ok(event.sender == user_id)
|
||||||
|
} else {
|
||||||
|
error!("Room {} has no m.room.create event (VERY BAD)!", room_id);
|
||||||
|
Err(Error::bad_database("Room has no m.room.create event"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
pub fn remove_alias(&self, alias: &RoomAliasId, user_id: &UserId) -> Result<()> {
|
||||||
|
if self.user_can_remove_alias(alias, user_id)? {
|
||||||
|
self.db.remove_alias(alias)
|
||||||
|
} else {
|
||||||
|
Err(Error::BadRequest(
|
||||||
|
ErrorKind::forbidden(),
|
||||||
|
"User is not permitted to remove this alias.",
|
||||||
|
))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
|
|
|
@ -133,7 +133,10 @@ impl Service {
|
||||||
match services().rooms.timeline.get_pdu(&event_id) {
|
match services().rooms.timeline.get_pdu(&event_id) {
|
||||||
Ok(Some(pdu)) => {
|
Ok(Some(pdu)) => {
|
||||||
if pdu.room_id != room_id {
|
if pdu.room_id != room_id {
|
||||||
return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db"));
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::forbidden(),
|
||||||
|
"Evil event in db",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
for auth_event in &pdu.auth_events {
|
for auth_event in &pdu.auth_events {
|
||||||
let sauthevent = services()
|
let sauthevent = services()
|
||||||
|
|
|
@ -9,6 +9,7 @@ use std::{
|
||||||
};
|
};
|
||||||
|
|
||||||
use futures_util::{stream::FuturesUnordered, Future, StreamExt};
|
use futures_util::{stream::FuturesUnordered, Future, StreamExt};
|
||||||
|
use globals::SigningKeys;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{
|
api::{
|
||||||
client::error::ErrorKind,
|
client::error::ErrorKind,
|
||||||
|
@ -30,7 +31,6 @@ use ruma::{
|
||||||
StateEventType, TimelineEventType,
|
StateEventType, TimelineEventType,
|
||||||
},
|
},
|
||||||
int,
|
int,
|
||||||
serde::Base64,
|
|
||||||
state_res::{self, RoomVersion, StateMap},
|
state_res::{self, RoomVersion, StateMap},
|
||||||
uint, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch,
|
uint, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch,
|
||||||
OwnedServerName, OwnedServerSigningKeyId, RoomId, RoomVersionId, ServerName,
|
OwnedServerName, OwnedServerSigningKeyId, RoomId, RoomVersionId, ServerName,
|
||||||
|
@ -78,7 +78,7 @@ impl Service {
|
||||||
room_id: &'a RoomId,
|
room_id: &'a RoomId,
|
||||||
value: BTreeMap<String, CanonicalJsonValue>,
|
value: BTreeMap<String, CanonicalJsonValue>,
|
||||||
is_timeline_event: bool,
|
is_timeline_event: bool,
|
||||||
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
pub_key_map: &'a RwLock<BTreeMap<String, SigningKeys>>,
|
||||||
) -> Result<Option<Vec<u8>>> {
|
) -> Result<Option<Vec<u8>>> {
|
||||||
// 0. Check the server is in the room
|
// 0. Check the server is in the room
|
||||||
if !services().rooms.metadata.exists(room_id)? {
|
if !services().rooms.metadata.exists(room_id)? {
|
||||||
|
@ -90,7 +90,7 @@ impl Service {
|
||||||
|
|
||||||
if services().rooms.metadata.is_disabled(room_id)? {
|
if services().rooms.metadata.is_disabled(room_id)? {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"Federation of this room is currently disabled on this server.",
|
"Federation of this room is currently disabled on this server.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -162,7 +162,7 @@ impl Service {
|
||||||
// Check for disabled again because it might have changed
|
// Check for disabled again because it might have changed
|
||||||
if services().rooms.metadata.is_disabled(room_id)? {
|
if services().rooms.metadata.is_disabled(room_id)? {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"Federation of this room is currently disabled on this server.",
|
"Federation of this room is currently disabled on this server.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -304,19 +304,12 @@ impl Service {
|
||||||
room_id: &'a RoomId,
|
room_id: &'a RoomId,
|
||||||
mut value: BTreeMap<String, CanonicalJsonValue>,
|
mut value: BTreeMap<String, CanonicalJsonValue>,
|
||||||
auth_events_known: bool,
|
auth_events_known: bool,
|
||||||
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
pub_key_map: &'a RwLock<BTreeMap<String, SigningKeys>>,
|
||||||
) -> AsyncRecursiveType<'a, Result<(Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>)>> {
|
) -> AsyncRecursiveType<'a, Result<(Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>)>> {
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
// 1.1. Remove unsigned field
|
// 1.1. Remove unsigned field
|
||||||
value.remove("unsigned");
|
value.remove("unsigned");
|
||||||
|
|
||||||
// TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json
|
|
||||||
|
|
||||||
// We go through all the signatures we see on the value and fetch the corresponding signing
|
|
||||||
// keys
|
|
||||||
self.fetch_required_signing_keys(&value, pub_key_map)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// 2. Check signatures, otherwise drop
|
// 2. Check signatures, otherwise drop
|
||||||
// 3. check content hash, redact if doesn't match
|
// 3. check content hash, redact if doesn't match
|
||||||
let create_event_content: RoomCreateEventContent =
|
let create_event_content: RoomCreateEventContent =
|
||||||
|
@ -329,41 +322,80 @@ impl Service {
|
||||||
let room_version =
|
let room_version =
|
||||||
RoomVersion::new(room_version_id).expect("room version is supported");
|
RoomVersion::new(room_version_id).expect("room version is supported");
|
||||||
|
|
||||||
let guard = pub_key_map.read().await;
|
// TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json
|
||||||
let mut val = match ruma::signatures::verify_event(&guard, &value, room_version_id) {
|
|
||||||
Err(e) => {
|
|
||||||
// Drop
|
|
||||||
warn!("Dropping bad event {}: {}", event_id, e,);
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Signature verification failed",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
Ok(ruma::signatures::Verified::Signatures) => {
|
|
||||||
// Redact
|
|
||||||
warn!("Calculated hash does not match: {}", event_id);
|
|
||||||
let obj = match ruma::canonical_json::redact(value, room_version_id, None) {
|
|
||||||
Ok(obj) => obj,
|
|
||||||
Err(_) => {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Redaction failed",
|
|
||||||
))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Skip the PDU if it is redacted and we already have it as an outlier event
|
// We go through all the signatures we see on the value and fetch the corresponding signing
|
||||||
if services().rooms.timeline.get_pdu_json(event_id)?.is_some() {
|
// keys
|
||||||
|
self.fetch_required_signing_keys(&value, pub_key_map)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let origin_server_ts = value.get("origin_server_ts").ok_or_else(|| {
|
||||||
|
error!("Invalid PDU, no origin_server_ts field");
|
||||||
|
Error::BadRequest(
|
||||||
|
ErrorKind::MissingParam,
|
||||||
|
"Invalid PDU, no origin_server_ts field",
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let origin_server_ts: MilliSecondsSinceUnixEpoch = {
|
||||||
|
let ts = origin_server_ts.as_integer().ok_or_else(|| {
|
||||||
|
Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"origin_server_ts must be an integer",
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
MilliSecondsSinceUnixEpoch(i64::from(ts).try_into().map_err(|_| {
|
||||||
|
Error::BadRequest(ErrorKind::InvalidParam, "Time must be after the unix epoch")
|
||||||
|
})?)
|
||||||
|
};
|
||||||
|
|
||||||
|
let guard = pub_key_map.read().await;
|
||||||
|
|
||||||
|
let pkey_map = (*guard).clone();
|
||||||
|
|
||||||
|
// Removing all the expired keys, unless the room version allows stale keys
|
||||||
|
let filtered_keys = services().globals.filter_keys_server_map(
|
||||||
|
pkey_map,
|
||||||
|
origin_server_ts,
|
||||||
|
room_version_id,
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut val =
|
||||||
|
match ruma::signatures::verify_event(&filtered_keys, &value, room_version_id) {
|
||||||
|
Err(e) => {
|
||||||
|
// Drop
|
||||||
|
warn!("Dropping bad event {}: {}", event_id, e,);
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Event was redacted and we already knew about it",
|
"Signature verification failed",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
Ok(ruma::signatures::Verified::Signatures) => {
|
||||||
|
// Redact
|
||||||
|
warn!("Calculated hash does not match: {}", event_id);
|
||||||
|
let obj = match ruma::canonical_json::redact(value, room_version_id, None) {
|
||||||
|
Ok(obj) => obj,
|
||||||
|
Err(_) => {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Redaction failed",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
obj
|
// Skip the PDU if it is redacted and we already have it as an outlier event
|
||||||
}
|
if services().rooms.timeline.get_pdu_json(event_id)?.is_some() {
|
||||||
Ok(ruma::signatures::Verified::All) => value,
|
return Err(Error::BadRequest(
|
||||||
};
|
ErrorKind::InvalidParam,
|
||||||
|
"Event was redacted and we already knew about it",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
obj
|
||||||
|
}
|
||||||
|
Ok(ruma::signatures::Verified::All) => value,
|
||||||
|
};
|
||||||
|
|
||||||
drop(guard);
|
drop(guard);
|
||||||
|
|
||||||
|
@ -487,7 +519,7 @@ impl Service {
|
||||||
create_event: &PduEvent,
|
create_event: &PduEvent,
|
||||||
origin: &ServerName,
|
origin: &ServerName,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>,
|
||||||
) -> Result<Option<Vec<u8>>> {
|
) -> Result<Option<Vec<u8>>> {
|
||||||
// Skip the PDU if we already have it as a timeline event
|
// Skip the PDU if we already have it as a timeline event
|
||||||
if let Ok(Some(pduid)) = services().rooms.timeline.get_pdu_id(&incoming_pdu.event_id) {
|
if let Ok(Some(pduid)) = services().rooms.timeline.get_pdu_id(&incoming_pdu.event_id) {
|
||||||
|
@ -1097,7 +1129,7 @@ impl Service {
|
||||||
create_event: &'a PduEvent,
|
create_event: &'a PduEvent,
|
||||||
room_id: &'a RoomId,
|
room_id: &'a RoomId,
|
||||||
room_version_id: &'a RoomVersionId,
|
room_version_id: &'a RoomVersionId,
|
||||||
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
pub_key_map: &'a RwLock<BTreeMap<String, SigningKeys>>,
|
||||||
) -> AsyncRecursiveType<'a, Vec<(Arc<PduEvent>, Option<BTreeMap<String, CanonicalJsonValue>>)>>
|
) -> AsyncRecursiveType<'a, Vec<(Arc<PduEvent>, Option<BTreeMap<String, CanonicalJsonValue>>)>>
|
||||||
{
|
{
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
|
@ -1280,7 +1312,7 @@ impl Service {
|
||||||
create_event: &PduEvent,
|
create_event: &PduEvent,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
room_version_id: &RoomVersionId,
|
room_version_id: &RoomVersionId,
|
||||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>,
|
||||||
initial_set: Vec<Arc<EventId>>,
|
initial_set: Vec<Arc<EventId>>,
|
||||||
) -> Result<(
|
) -> Result<(
|
||||||
Vec<Arc<EventId>>,
|
Vec<Arc<EventId>>,
|
||||||
|
@ -1378,7 +1410,7 @@ impl Service {
|
||||||
pub(crate) async fn fetch_required_signing_keys(
|
pub(crate) async fn fetch_required_signing_keys(
|
||||||
&self,
|
&self,
|
||||||
event: &BTreeMap<String, CanonicalJsonValue>,
|
event: &BTreeMap<String, CanonicalJsonValue>,
|
||||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let signatures = event
|
let signatures = event
|
||||||
.get("signatures")
|
.get("signatures")
|
||||||
|
@ -1407,6 +1439,7 @@ impl Service {
|
||||||
)
|
)
|
||||||
})?,
|
})?,
|
||||||
signature_ids,
|
signature_ids,
|
||||||
|
true,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
|
@ -1434,7 +1467,7 @@ impl Service {
|
||||||
pdu: &RawJsonValue,
|
pdu: &RawJsonValue,
|
||||||
servers: &mut BTreeMap<OwnedServerName, BTreeMap<OwnedServerSigningKeyId, QueryCriteria>>,
|
servers: &mut BTreeMap<OwnedServerName, BTreeMap<OwnedServerSigningKeyId, QueryCriteria>>,
|
||||||
room_version: &RoomVersionId,
|
room_version: &RoomVersionId,
|
||||||
pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap<String, BTreeMap<String, Base64>>>,
|
pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap<String, SigningKeys>>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
|
let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
|
||||||
error!("Invalid PDU in server response: {:?}: {:?}", pdu, e);
|
error!("Invalid PDU in server response: {:?}: {:?}", pdu, e);
|
||||||
|
@ -1444,7 +1477,7 @@ impl Service {
|
||||||
let event_id = format!(
|
let event_id = format!(
|
||||||
"${}",
|
"${}",
|
||||||
ruma::signatures::reference_hash(&value, room_version)
|
ruma::signatures::reference_hash(&value, room_version)
|
||||||
.expect("ruma can calculate reference hashes")
|
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid PDU format"))?
|
||||||
);
|
);
|
||||||
let event_id = <&EventId>::try_from(event_id.as_str())
|
let event_id = <&EventId>::try_from(event_id.as_str())
|
||||||
.expect("ruma's reference hashes are valid event ids");
|
.expect("ruma's reference hashes are valid event ids");
|
||||||
|
@ -1485,8 +1518,18 @@ impl Service {
|
||||||
|
|
||||||
let signature_ids = signature_object.keys().cloned().collect::<Vec<_>>();
|
let signature_ids = signature_object.keys().cloned().collect::<Vec<_>>();
|
||||||
|
|
||||||
let contains_all_ids = |keys: &BTreeMap<String, Base64>| {
|
let contains_all_ids = |keys: &SigningKeys| {
|
||||||
signature_ids.iter().all(|id| keys.contains_key(id))
|
signature_ids.iter().all(|id| {
|
||||||
|
keys.verify_keys
|
||||||
|
.keys()
|
||||||
|
.map(ToString::to_string)
|
||||||
|
.any(|key_id| id == &key_id)
|
||||||
|
|| keys
|
||||||
|
.old_verify_keys
|
||||||
|
.keys()
|
||||||
|
.map(ToString::to_string)
|
||||||
|
.any(|key_id| id == &key_id)
|
||||||
|
})
|
||||||
};
|
};
|
||||||
|
|
||||||
let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| {
|
let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| {
|
||||||
|
@ -1499,19 +1542,14 @@ impl Service {
|
||||||
|
|
||||||
trace!("Loading signing keys for {}", origin);
|
trace!("Loading signing keys for {}", origin);
|
||||||
|
|
||||||
let result: BTreeMap<_, _> = services()
|
if let Some(result) = services().globals.signing_keys_for(origin)? {
|
||||||
.globals
|
if !contains_all_ids(&result) {
|
||||||
.signing_keys_for(origin)?
|
trace!("Signing key not loaded for {}", origin);
|
||||||
.into_iter()
|
servers.insert(origin.to_owned(), BTreeMap::new());
|
||||||
.map(|(k, v)| (k.to_string(), v.key))
|
}
|
||||||
.collect();
|
|
||||||
|
|
||||||
if !contains_all_ids(&result) {
|
pub_key_map.insert(origin.to_string(), result);
|
||||||
trace!("Signing key not loaded for {}", origin);
|
|
||||||
servers.insert(origin.to_owned(), BTreeMap::new());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub_key_map.insert(origin.to_string(), result);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -1521,7 +1559,7 @@ impl Service {
|
||||||
&self,
|
&self,
|
||||||
event: &create_join_event::v2::Response,
|
event: &create_join_event::v2::Response,
|
||||||
room_version: &RoomVersionId,
|
room_version: &RoomVersionId,
|
||||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut servers: BTreeMap<
|
let mut servers: BTreeMap<
|
||||||
OwnedServerName,
|
OwnedServerName,
|
||||||
|
@ -1584,10 +1622,7 @@ impl Service {
|
||||||
|
|
||||||
let result = services()
|
let result = services()
|
||||||
.globals
|
.globals
|
||||||
.add_signing_key(&k.server_name, k.clone())?
|
.add_signing_key_from_trusted_server(&k.server_name, k.clone())?;
|
||||||
.into_iter()
|
|
||||||
.map(|(k, v)| (k.to_string(), v.key))
|
|
||||||
.collect::<BTreeMap<_, _>>();
|
|
||||||
|
|
||||||
pkm.insert(k.server_name.to_string(), result);
|
pkm.insert(k.server_name.to_string(), result);
|
||||||
}
|
}
|
||||||
|
@ -1618,12 +1653,9 @@ impl Service {
|
||||||
if let (Ok(get_keys_response), origin) = result {
|
if let (Ok(get_keys_response), origin) = result {
|
||||||
info!("Result is from {origin}");
|
info!("Result is from {origin}");
|
||||||
if let Ok(key) = get_keys_response.server_key.deserialize() {
|
if let Ok(key) = get_keys_response.server_key.deserialize() {
|
||||||
let result: BTreeMap<_, _> = services()
|
let result = services()
|
||||||
.globals
|
.globals
|
||||||
.add_signing_key(&origin, key)?
|
.add_signing_key_from_origin(&origin, key)?;
|
||||||
.into_iter()
|
|
||||||
.map(|(k, v)| (k.to_string(), v.key))
|
|
||||||
.collect();
|
|
||||||
pub_key_map.write().await.insert(origin.to_string(), result);
|
pub_key_map.write().await.insert(origin.to_string(), result);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1655,11 +1687,6 @@ impl Service {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if acl_event_content.allow.is_empty() {
|
|
||||||
// Ignore broken acl events
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
if acl_event_content.is_allowed(server_name) {
|
if acl_event_content.is_allowed(server_name) {
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
|
@ -1668,7 +1695,7 @@ impl Service {
|
||||||
server_name, room_id
|
server_name, room_id
|
||||||
);
|
);
|
||||||
Err(Error::BadRequest(
|
Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"Server was denied by room ACL",
|
"Server was denied by room ACL",
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
@ -1681,9 +1708,23 @@ impl Service {
|
||||||
&self,
|
&self,
|
||||||
origin: &ServerName,
|
origin: &ServerName,
|
||||||
signature_ids: Vec<String>,
|
signature_ids: Vec<String>,
|
||||||
) -> Result<BTreeMap<String, Base64>> {
|
// Whether to ask for keys from trusted servers. Should be false when getting
|
||||||
let contains_all_ids =
|
// keys for validating requests, as per MSC4029
|
||||||
|keys: &BTreeMap<String, Base64>| signature_ids.iter().all(|id| keys.contains_key(id));
|
query_via_trusted_servers: bool,
|
||||||
|
) -> Result<SigningKeys> {
|
||||||
|
let contains_all_ids = |keys: &SigningKeys| {
|
||||||
|
signature_ids.iter().all(|id| {
|
||||||
|
keys.verify_keys
|
||||||
|
.keys()
|
||||||
|
.map(ToString::to_string)
|
||||||
|
.any(|key_id| id == &key_id)
|
||||||
|
|| keys
|
||||||
|
.old_verify_keys
|
||||||
|
.keys()
|
||||||
|
.map(ToString::to_string)
|
||||||
|
.any(|key_id| id == &key_id)
|
||||||
|
})
|
||||||
|
};
|
||||||
|
|
||||||
let permit = services()
|
let permit = services()
|
||||||
.globals
|
.globals
|
||||||
|
@ -1744,94 +1785,172 @@ impl Service {
|
||||||
|
|
||||||
trace!("Loading signing keys for {}", origin);
|
trace!("Loading signing keys for {}", origin);
|
||||||
|
|
||||||
let mut result: BTreeMap<_, _> = services()
|
let result = services().globals.signing_keys_for(origin)?;
|
||||||
.globals
|
|
||||||
.signing_keys_for(origin)?
|
|
||||||
.into_iter()
|
|
||||||
.map(|(k, v)| (k.to_string(), v.key))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
if contains_all_ids(&result) {
|
let mut expires_soon_or_has_expired = false;
|
||||||
return Ok(result);
|
|
||||||
|
if let Some(result) = result.clone() {
|
||||||
|
let ts_threshold = MilliSecondsSinceUnixEpoch::from_system_time(
|
||||||
|
SystemTime::now() + Duration::from_secs(30 * 60),
|
||||||
|
)
|
||||||
|
.expect("Should be valid until year 500,000,000");
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"The treshhold is {:?}, found time is {:?} for server {}",
|
||||||
|
ts_threshold, result.valid_until_ts, origin
|
||||||
|
);
|
||||||
|
|
||||||
|
if contains_all_ids(&result) {
|
||||||
|
// We want to ensure that the keys remain valid by the time the other functions that handle signatures reach them
|
||||||
|
if result.valid_until_ts > ts_threshold {
|
||||||
|
debug!(
|
||||||
|
"Keys for {} are deemed as valid, as they expire at {:?}",
|
||||||
|
&origin, &result.valid_until_ts
|
||||||
|
);
|
||||||
|
return Ok(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
expires_soon_or_has_expired = true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let mut keys = result.unwrap_or_else(|| SigningKeys {
|
||||||
|
verify_keys: BTreeMap::new(),
|
||||||
|
old_verify_keys: BTreeMap::new(),
|
||||||
|
valid_until_ts: MilliSecondsSinceUnixEpoch::now(),
|
||||||
|
});
|
||||||
|
|
||||||
|
// We want to set this to the max, and then lower it whenever we see older keys
|
||||||
|
keys.valid_until_ts = MilliSecondsSinceUnixEpoch::from_system_time(
|
||||||
|
SystemTime::now() + Duration::from_secs(7 * 86400),
|
||||||
|
)
|
||||||
|
.expect("Should be valid until year 500,000,000");
|
||||||
|
|
||||||
debug!("Fetching signing keys for {} over federation", origin);
|
debug!("Fetching signing keys for {} over federation", origin);
|
||||||
|
|
||||||
if let Some(server_key) = services()
|
if let Some(mut server_key) = services()
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(origin, get_server_keys::v2::Request::new())
|
.send_federation_request(origin, get_server_keys::v2::Request::new())
|
||||||
.await
|
.await
|
||||||
.ok()
|
.ok()
|
||||||
.and_then(|resp| resp.server_key.deserialize().ok())
|
.and_then(|resp| resp.server_key.deserialize().ok())
|
||||||
{
|
{
|
||||||
|
// Keys should only be valid for a maximum of seven days
|
||||||
|
server_key.valid_until_ts = server_key.valid_until_ts.min(
|
||||||
|
MilliSecondsSinceUnixEpoch::from_system_time(
|
||||||
|
SystemTime::now() + Duration::from_secs(7 * 86400),
|
||||||
|
)
|
||||||
|
.expect("Should be valid until year 500,000,000"),
|
||||||
|
);
|
||||||
|
|
||||||
services()
|
services()
|
||||||
.globals
|
.globals
|
||||||
.add_signing_key(origin, server_key.clone())?;
|
.add_signing_key_from_origin(origin, server_key.clone())?;
|
||||||
|
|
||||||
result.extend(
|
if keys.valid_until_ts > server_key.valid_until_ts {
|
||||||
|
keys.valid_until_ts = server_key.valid_until_ts;
|
||||||
|
}
|
||||||
|
|
||||||
|
keys.verify_keys.extend(
|
||||||
server_key
|
server_key
|
||||||
.verify_keys
|
.verify_keys
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(k, v)| (k.to_string(), v.key)),
|
.map(|(id, key)| (id.to_string(), key)),
|
||||||
);
|
);
|
||||||
result.extend(
|
keys.old_verify_keys.extend(
|
||||||
server_key
|
server_key
|
||||||
.old_verify_keys
|
.old_verify_keys
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(k, v)| (k.to_string(), v.key)),
|
.map(|(id, key)| (id.to_string(), key)),
|
||||||
);
|
);
|
||||||
|
|
||||||
if contains_all_ids(&result) {
|
if contains_all_ids(&keys) {
|
||||||
return Ok(result);
|
return Ok(keys);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for server in services().globals.trusted_servers() {
|
if query_via_trusted_servers {
|
||||||
debug!("Asking {} for {}'s signing key", server, origin);
|
for server in services().globals.trusted_servers() {
|
||||||
if let Some(server_keys) = services()
|
debug!("Asking {} for {}'s signing key", server, origin);
|
||||||
.sending
|
if let Some(server_keys) = services()
|
||||||
.send_federation_request(
|
.sending
|
||||||
server,
|
.send_federation_request(
|
||||||
get_remote_server_keys::v2::Request::new(
|
server,
|
||||||
origin.to_owned(),
|
get_remote_server_keys::v2::Request::new(
|
||||||
MilliSecondsSinceUnixEpoch::from_system_time(
|
origin.to_owned(),
|
||||||
SystemTime::now()
|
MilliSecondsSinceUnixEpoch::from_system_time(
|
||||||
.checked_add(Duration::from_secs(3600))
|
SystemTime::now()
|
||||||
.expect("SystemTime to large"),
|
.checked_add(Duration::from_secs(3600))
|
||||||
)
|
.expect("SystemTime to large"),
|
||||||
.expect("time is valid"),
|
)
|
||||||
),
|
.expect("time is valid"),
|
||||||
)
|
),
|
||||||
.await
|
)
|
||||||
.ok()
|
.await
|
||||||
.map(|resp| {
|
.ok()
|
||||||
resp.server_keys
|
.map(|resp| {
|
||||||
.into_iter()
|
resp.server_keys
|
||||||
.filter_map(|e| e.deserialize().ok())
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
})
|
|
||||||
{
|
|
||||||
trace!("Got signing keys: {:?}", server_keys);
|
|
||||||
for k in server_keys {
|
|
||||||
services().globals.add_signing_key(origin, k.clone())?;
|
|
||||||
result.extend(
|
|
||||||
k.verify_keys
|
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(k, v)| (k.to_string(), v.key)),
|
.filter_map(|e| e.deserialize().ok())
|
||||||
);
|
.collect::<Vec<_>>()
|
||||||
result.extend(
|
})
|
||||||
k.old_verify_keys
|
{
|
||||||
.into_iter()
|
trace!("Got signing keys: {:?}", server_keys);
|
||||||
.map(|(k, v)| (k.to_string(), v.key)),
|
for mut k in server_keys {
|
||||||
);
|
if k.valid_until_ts
|
||||||
}
|
// Half an hour should give plenty of time for the server to respond with keys that are still
|
||||||
|
// valid, given we requested keys which are valid at least an hour from now
|
||||||
|
< MilliSecondsSinceUnixEpoch::from_system_time(
|
||||||
|
SystemTime::now() + Duration::from_secs(30 * 60),
|
||||||
|
)
|
||||||
|
.expect("Should be valid until year 500,000,000")
|
||||||
|
{
|
||||||
|
// Keys should only be valid for a maximum of seven days
|
||||||
|
k.valid_until_ts = k.valid_until_ts.min(
|
||||||
|
MilliSecondsSinceUnixEpoch::from_system_time(
|
||||||
|
SystemTime::now() + Duration::from_secs(7 * 86400),
|
||||||
|
)
|
||||||
|
.expect("Should be valid until year 500,000,000"),
|
||||||
|
);
|
||||||
|
|
||||||
if contains_all_ids(&result) {
|
if keys.valid_until_ts > k.valid_until_ts {
|
||||||
return Ok(result);
|
keys.valid_until_ts = k.valid_until_ts;
|
||||||
|
}
|
||||||
|
|
||||||
|
services()
|
||||||
|
.globals
|
||||||
|
.add_signing_key_from_trusted_server(origin, k.clone())?;
|
||||||
|
keys.verify_keys.extend(
|
||||||
|
k.verify_keys
|
||||||
|
.into_iter()
|
||||||
|
.map(|(id, key)| (id.to_string(), key)),
|
||||||
|
);
|
||||||
|
keys.old_verify_keys.extend(
|
||||||
|
k.old_verify_keys
|
||||||
|
.into_iter()
|
||||||
|
.map(|(id, key)| (id.to_string(), key)),
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
warn!(
|
||||||
|
"Server {} gave us keys older than we requested, valid until: {:?}",
|
||||||
|
origin, k.valid_until_ts
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if contains_all_ids(&keys) {
|
||||||
|
return Ok(keys);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We should return these keys if fresher keys were not found
|
||||||
|
if expires_soon_or_has_expired {
|
||||||
|
info!("Returning stale keys for {}", origin);
|
||||||
|
return Ok(keys);
|
||||||
|
}
|
||||||
|
|
||||||
drop(permit);
|
drop(permit);
|
||||||
|
|
||||||
back_off(signature_ids).await;
|
back_off(signature_ids).await;
|
||||||
|
|
|
@ -3,9 +3,9 @@ use std::sync::Arc;
|
||||||
|
|
||||||
pub use data::Data;
|
pub use data::Data;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::relations::get_relating_events,
|
api::{client::relations::get_relating_events, Direction},
|
||||||
events::{relation::RelationType, TimelineEventType},
|
events::{relation::RelationType, TimelineEventType},
|
||||||
EventId, RoomId, UserId,
|
EventId, RoomId, UInt, UserId,
|
||||||
};
|
};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
|
|
||||||
|
@ -48,37 +48,57 @@ impl Service {
|
||||||
target: &EventId,
|
target: &EventId,
|
||||||
filter_event_type: Option<TimelineEventType>,
|
filter_event_type: Option<TimelineEventType>,
|
||||||
filter_rel_type: Option<RelationType>,
|
filter_rel_type: Option<RelationType>,
|
||||||
from: PduCount,
|
from: Option<String>,
|
||||||
to: Option<PduCount>,
|
to: Option<String>,
|
||||||
limit: usize,
|
limit: Option<UInt>,
|
||||||
|
recurse: bool,
|
||||||
|
dir: &Direction,
|
||||||
) -> Result<get_relating_events::v1::Response> {
|
) -> Result<get_relating_events::v1::Response> {
|
||||||
|
let from = match from {
|
||||||
|
Some(from) => PduCount::try_from_string(&from)?,
|
||||||
|
None => match dir {
|
||||||
|
Direction::Forward => PduCount::min(),
|
||||||
|
Direction::Backward => PduCount::max(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
let to = to.as_ref().and_then(|t| PduCount::try_from_string(t).ok());
|
||||||
|
|
||||||
|
// Use limit or else 10, with maximum 100
|
||||||
|
let limit = limit
|
||||||
|
.and_then(|u| u32::try_from(u).ok())
|
||||||
|
.map_or(10_usize, |u| u as usize)
|
||||||
|
.min(100);
|
||||||
|
|
||||||
let next_token;
|
let next_token;
|
||||||
|
|
||||||
//TODO: Fix ruma: match body.dir {
|
// Spec (v1.10) recommends depth of at least 3
|
||||||
match ruma::api::Direction::Backward {
|
let depth: u8 = if recurse { 3 } else { 1 };
|
||||||
ruma::api::Direction::Forward => {
|
|
||||||
let events_after: Vec<_> = services()
|
match dir {
|
||||||
.rooms
|
Direction::Forward => {
|
||||||
.pdu_metadata
|
let relations_until = &services().rooms.pdu_metadata.relations_until(
|
||||||
.relations_until(sender_user, room_id, target, from)? // TODO: should be relations_after
|
sender_user,
|
||||||
.filter(|r| {
|
room_id,
|
||||||
r.as_ref().map_or(true, |(_, pdu)| {
|
target,
|
||||||
filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t)
|
from,
|
||||||
&& if let Ok(content) =
|
depth,
|
||||||
serde_json::from_str::<ExtractRelatesToEventId>(
|
)?;
|
||||||
pdu.content.get(),
|
let events_after: Vec<_> = relations_until // TODO: should be relations_after
|
||||||
)
|
.iter()
|
||||||
{
|
.filter(|(_, pdu)| {
|
||||||
filter_rel_type
|
filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t)
|
||||||
.as_ref()
|
&& if let Ok(content) =
|
||||||
.map_or(true, |r| &content.relates_to.rel_type == r)
|
serde_json::from_str::<ExtractRelatesToEventId>(pdu.content.get())
|
||||||
} else {
|
{
|
||||||
false
|
filter_rel_type
|
||||||
}
|
.as_ref()
|
||||||
})
|
.map_or(true, |r| &content.relates_to.rel_type == r)
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
})
|
})
|
||||||
.take(limit)
|
.take(limit)
|
||||||
.filter_map(|r| r.ok()) // Filter out buggy events
|
|
||||||
.filter(|(_, pdu)| {
|
.filter(|(_, pdu)| {
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -86,7 +106,7 @@ impl Service {
|
||||||
.user_can_see_event(sender_user, room_id, &pdu.event_id)
|
.user_can_see_event(sender_user, room_id, &pdu.event_id)
|
||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
})
|
})
|
||||||
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
.take_while(|(k, _)| Some(k) != to.as_ref()) // Stop at `to`
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
next_token = events_after.last().map(|(count, _)| count).copied();
|
next_token = events_after.last().map(|(count, _)| count).copied();
|
||||||
|
@ -101,31 +121,32 @@ impl Service {
|
||||||
chunk: events_after,
|
chunk: events_after,
|
||||||
next_batch: next_token.map(|t| t.stringify()),
|
next_batch: next_token.map(|t| t.stringify()),
|
||||||
prev_batch: Some(from.stringify()),
|
prev_batch: Some(from.stringify()),
|
||||||
|
recursion_depth: if recurse { Some(depth.into()) } else { None },
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
ruma::api::Direction::Backward => {
|
Direction::Backward => {
|
||||||
let events_before: Vec<_> = services()
|
let relations_until = &services().rooms.pdu_metadata.relations_until(
|
||||||
.rooms
|
sender_user,
|
||||||
.pdu_metadata
|
room_id,
|
||||||
.relations_until(sender_user, room_id, target, from)?
|
target,
|
||||||
.filter(|r| {
|
from,
|
||||||
r.as_ref().map_or(true, |(_, pdu)| {
|
depth,
|
||||||
filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t)
|
)?;
|
||||||
&& if let Ok(content) =
|
let events_before: Vec<_> = relations_until
|
||||||
serde_json::from_str::<ExtractRelatesToEventId>(
|
.iter()
|
||||||
pdu.content.get(),
|
.filter(|(_, pdu)| {
|
||||||
)
|
filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t)
|
||||||
{
|
&& if let Ok(content) =
|
||||||
filter_rel_type
|
serde_json::from_str::<ExtractRelatesToEventId>(pdu.content.get())
|
||||||
.as_ref()
|
{
|
||||||
.map_or(true, |r| &content.relates_to.rel_type == r)
|
filter_rel_type
|
||||||
} else {
|
.as_ref()
|
||||||
false
|
.map_or(true, |r| &content.relates_to.rel_type == r)
|
||||||
}
|
} else {
|
||||||
})
|
false
|
||||||
|
}
|
||||||
})
|
})
|
||||||
.take(limit)
|
.take(limit)
|
||||||
.filter_map(|r| r.ok()) // Filter out buggy events
|
|
||||||
.filter(|(_, pdu)| {
|
.filter(|(_, pdu)| {
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -133,7 +154,7 @@ impl Service {
|
||||||
.user_can_see_event(sender_user, room_id, &pdu.event_id)
|
.user_can_see_event(sender_user, room_id, &pdu.event_id)
|
||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
})
|
})
|
||||||
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
.take_while(|&(k, _)| Some(k) != to.as_ref()) // Stop at `to`
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
next_token = events_before.last().map(|(count, _)| count).copied();
|
next_token = events_before.last().map(|(count, _)| count).copied();
|
||||||
|
@ -147,6 +168,7 @@ impl Service {
|
||||||
chunk: events_before,
|
chunk: events_before,
|
||||||
next_batch: next_token.map(|t| t.stringify()),
|
next_batch: next_token.map(|t| t.stringify()),
|
||||||
prev_batch: Some(from.stringify()),
|
prev_batch: Some(from.stringify()),
|
||||||
|
recursion_depth: if recurse { Some(depth.into()) } else { None },
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -158,14 +180,44 @@ impl Service {
|
||||||
room_id: &'a RoomId,
|
room_id: &'a RoomId,
|
||||||
target: &'a EventId,
|
target: &'a EventId,
|
||||||
until: PduCount,
|
until: PduCount,
|
||||||
) -> Result<impl Iterator<Item = Result<(PduCount, PduEvent)>> + 'a> {
|
max_depth: u8,
|
||||||
|
) -> Result<Vec<(PduCount, PduEvent)>> {
|
||||||
let room_id = services().rooms.short.get_or_create_shortroomid(room_id)?;
|
let room_id = services().rooms.short.get_or_create_shortroomid(room_id)?;
|
||||||
let target = match services().rooms.timeline.get_pdu_count(target)? {
|
let target = match services().rooms.timeline.get_pdu_count(target)? {
|
||||||
Some(PduCount::Normal(c)) => c,
|
Some(PduCount::Normal(c)) => c,
|
||||||
// TODO: Support backfilled relations
|
// TODO: Support backfilled relations
|
||||||
_ => 0, // This will result in an empty iterator
|
_ => 0, // This will result in an empty iterator
|
||||||
};
|
};
|
||||||
self.db.relations_until(user_id, room_id, target, until)
|
|
||||||
|
self.db
|
||||||
|
.relations_until(user_id, room_id, target, until)
|
||||||
|
.map(|mut relations| {
|
||||||
|
let mut pdus: Vec<_> = (*relations).into_iter().filter_map(Result::ok).collect();
|
||||||
|
let mut stack: Vec<_> =
|
||||||
|
pdus.clone().iter().map(|pdu| (pdu.to_owned(), 1)).collect();
|
||||||
|
|
||||||
|
while let Some(stack_pdu) = stack.pop() {
|
||||||
|
let target = match stack_pdu.0 .0 {
|
||||||
|
PduCount::Normal(c) => c,
|
||||||
|
// TODO: Support backfilled relations
|
||||||
|
PduCount::Backfilled(_) => 0, // This will result in an empty iterator
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Ok(relations) = self.db.relations_until(user_id, room_id, target, until)
|
||||||
|
{
|
||||||
|
for relation in relations.flatten() {
|
||||||
|
if stack_pdu.1 < max_depth {
|
||||||
|
stack.push((relation.clone(), stack_pdu.1 + 1));
|
||||||
|
}
|
||||||
|
|
||||||
|
pdus.push(relation);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pdus.sort_by(|a, b| a.0.cmp(&b.0));
|
||||||
|
pdus
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self, room_id, event_ids))]
|
#[tracing::instrument(skip(self, room_id, event_ids))]
|
||||||
|
|
|
@ -4,6 +4,8 @@ use ruma::RoomId;
|
||||||
pub trait Data: Send + Sync {
|
pub trait Data: Send + Sync {
|
||||||
fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>;
|
fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>;
|
||||||
|
|
||||||
|
fn deindex_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>;
|
||||||
|
|
||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
fn search_pdus<'a>(
|
fn search_pdus<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
|
|
|
@ -15,6 +15,16 @@ impl Service {
|
||||||
self.db.index_pdu(shortroomid, pdu_id, message_body)
|
self.db.index_pdu(shortroomid, pdu_id, message_body)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
pub fn deindex_pdu<'a>(
|
||||||
|
&self,
|
||||||
|
shortroomid: u64,
|
||||||
|
pdu_id: &[u8],
|
||||||
|
message_body: &str,
|
||||||
|
) -> Result<()> {
|
||||||
|
self.db.deindex_pdu(shortroomid, pdu_id, message_body)
|
||||||
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
pub fn search_pdus<'a>(
|
pub fn search_pdus<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
|
|
|
@ -408,7 +408,7 @@ impl Service {
|
||||||
debug!("User is not allowed to see room {room_id}");
|
debug!("User is not allowed to see room {room_id}");
|
||||||
// This error will be caught later
|
// This error will be caught later
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"User is not allowed to see the room",
|
"User is not allowed to see the room",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
|
@ -321,6 +321,7 @@ impl Service {
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some(target_user.into()),
|
state_key: Some(target_user.into()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
timestamp: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(services()
|
Ok(services()
|
||||||
|
|
|
@ -21,10 +21,9 @@ use ruma::{
|
||||||
GlobalAccountDataEventType, StateEventType, TimelineEventType,
|
GlobalAccountDataEventType, StateEventType, TimelineEventType,
|
||||||
},
|
},
|
||||||
push::{Action, Ruleset, Tweak},
|
push::{Action, Ruleset, Tweak},
|
||||||
serde::Base64,
|
|
||||||
state_res::{self, Event, RoomVersion},
|
state_res::{self, Event, RoomVersion},
|
||||||
uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId,
|
uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch,
|
||||||
OwnedServerName, RoomId, RoomVersionId, ServerName, UserId,
|
OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, RoomVersionId, ServerName, UserId,
|
||||||
};
|
};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
||||||
|
@ -33,7 +32,10 @@ use tracing::{error, info, warn};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::server_server,
|
api::server_server,
|
||||||
service::pdu::{EventHash, PduBuilder},
|
service::{
|
||||||
|
globals::SigningKeys,
|
||||||
|
pdu::{EventHash, PduBuilder},
|
||||||
|
},
|
||||||
services, utils, Error, PduEvent, Result,
|
services, utils, Error, PduEvent, Result,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -399,7 +401,7 @@ impl Service {
|
||||||
&pdu.room_id,
|
&pdu.room_id,
|
||||||
false,
|
false,
|
||||||
)? {
|
)? {
|
||||||
self.redact_pdu(redact_id, pdu)?;
|
self.redact_pdu(redact_id, pdu, shortroomid)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -416,7 +418,7 @@ impl Service {
|
||||||
&pdu.room_id,
|
&pdu.room_id,
|
||||||
false,
|
false,
|
||||||
)? {
|
)? {
|
||||||
self.redact_pdu(redact_id, pdu)?;
|
self.redact_pdu(redact_id, pdu, shortroomid)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -483,20 +485,27 @@ impl Service {
|
||||||
.search
|
.search
|
||||||
.index_pdu(shortroomid, &pdu_id, &body)?;
|
.index_pdu(shortroomid, &pdu_id, &body)?;
|
||||||
|
|
||||||
let server_user = format!("@conduit:{}", services().globals.server_name());
|
let server_user = services().globals.server_user();
|
||||||
|
|
||||||
let to_conduit = body.starts_with(&format!("{server_user}: "))
|
let to_conduit = body.starts_with(&format!("{server_user}: "))
|
||||||
|| body.starts_with(&format!("{server_user} "))
|
|| body.starts_with(&format!("{server_user} "))
|
||||||
|| body == format!("{server_user}:")
|
|| body == format!("{server_user}:")
|
||||||
|| body == server_user;
|
|| body == server_user.as_str();
|
||||||
|
|
||||||
// This will evaluate to false if the emergency password is set up so that
|
// This will evaluate to false if the emergency password is set up so that
|
||||||
// the administrator can execute commands as conduit
|
// the administrator can execute commands as conduit
|
||||||
let from_conduit = pdu.sender == server_user
|
let from_conduit = pdu.sender == *server_user
|
||||||
&& services().globals.emergency_password().is_none();
|
&& services().globals.emergency_password().is_none();
|
||||||
|
|
||||||
if let Some(admin_room) = services().admin.get_admin_room()? {
|
if let Some(admin_room) = services().admin.get_admin_room()? {
|
||||||
if to_conduit && !from_conduit && admin_room == pdu.room_id {
|
if to_conduit
|
||||||
|
&& !from_conduit
|
||||||
|
&& admin_room == pdu.room_id
|
||||||
|
&& services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.is_joined(server_user, &admin_room)?
|
||||||
|
{
|
||||||
services().admin.process_message(body);
|
services().admin.process_message(body);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -656,6 +665,7 @@ impl Service {
|
||||||
unsigned,
|
unsigned,
|
||||||
state_key,
|
state_key,
|
||||||
redacts,
|
redacts,
|
||||||
|
timestamp,
|
||||||
} = pdu_builder;
|
} = pdu_builder;
|
||||||
|
|
||||||
let prev_events: Vec<_> = services()
|
let prev_events: Vec<_> = services()
|
||||||
|
@ -725,9 +735,9 @@ impl Service {
|
||||||
event_id: ruma::event_id!("$thiswillbefilledinlater").into(),
|
event_id: ruma::event_id!("$thiswillbefilledinlater").into(),
|
||||||
room_id: room_id.to_owned(),
|
room_id: room_id.to_owned(),
|
||||||
sender: sender.to_owned(),
|
sender: sender.to_owned(),
|
||||||
origin_server_ts: utils::millis_since_unix_epoch()
|
origin_server_ts: timestamp
|
||||||
.try_into()
|
.map(|ts| ts.get())
|
||||||
.expect("time is valid"),
|
.unwrap_or_else(|| MilliSecondsSinceUnixEpoch::now().get()),
|
||||||
kind: event_type,
|
kind: event_type,
|
||||||
content,
|
content,
|
||||||
state_key,
|
state_key,
|
||||||
|
@ -762,7 +772,7 @@ impl Service {
|
||||||
|
|
||||||
if !auth_check {
|
if !auth_check {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"Event is not authorized.",
|
"Event is not authorized.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -805,7 +815,7 @@ impl Service {
|
||||||
pdu.event_id = EventId::parse_arc(format!(
|
pdu.event_id = EventId::parse_arc(format!(
|
||||||
"${}",
|
"${}",
|
||||||
ruma::signatures::reference_hash(&pdu_json, &room_version_id)
|
ruma::signatures::reference_hash(&pdu_json, &room_version_id)
|
||||||
.expect("ruma can calculate reference hashes")
|
.expect("Event format validated when event was hashed")
|
||||||
))
|
))
|
||||||
.expect("ruma's reference hashes are valid event ids");
|
.expect("ruma's reference hashes are valid event ids");
|
||||||
|
|
||||||
|
@ -842,7 +852,7 @@ impl Service {
|
||||||
TimelineEventType::RoomEncryption => {
|
TimelineEventType::RoomEncryption => {
|
||||||
warn!("Encryption is not allowed in the admins room");
|
warn!("Encryption is not allowed in the admins room");
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"Encryption is not allowed in the admins room.",
|
"Encryption is not allowed in the admins room.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -857,7 +867,7 @@ impl Service {
|
||||||
.filter(|v| v.starts_with('@'))
|
.filter(|v| v.starts_with('@'))
|
||||||
.unwrap_or(sender.as_str());
|
.unwrap_or(sender.as_str());
|
||||||
let server_name = services().globals.server_name();
|
let server_name = services().globals.server_name();
|
||||||
let server_user = format!("@conduit:{}", server_name);
|
let server_user = services().globals.server_user().as_str();
|
||||||
let content = serde_json::from_str::<ExtractMembership>(pdu.content.get())
|
let content = serde_json::from_str::<ExtractMembership>(pdu.content.get())
|
||||||
.map_err(|_| Error::bad_database("Invalid content in pdu."))?;
|
.map_err(|_| Error::bad_database("Invalid content in pdu."))?;
|
||||||
|
|
||||||
|
@ -865,7 +875,7 @@ impl Service {
|
||||||
if target == server_user {
|
if target == server_user {
|
||||||
warn!("Conduit user cannot leave from admins room");
|
warn!("Conduit user cannot leave from admins room");
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"Conduit user cannot leave from admins room.",
|
"Conduit user cannot leave from admins room.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -881,7 +891,7 @@ impl Service {
|
||||||
if count < 2 {
|
if count < 2 {
|
||||||
warn!("Last admin cannot leave from admins room");
|
warn!("Last admin cannot leave from admins room");
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"Last admin cannot leave from admins room.",
|
"Last admin cannot leave from admins room.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -891,7 +901,7 @@ impl Service {
|
||||||
if target == server_user {
|
if target == server_user {
|
||||||
warn!("Conduit user cannot be banned in admins room");
|
warn!("Conduit user cannot be banned in admins room");
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"Conduit user cannot be banned in admins room.",
|
"Conduit user cannot be banned in admins room.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -907,7 +917,7 @@ impl Service {
|
||||||
if count < 2 {
|
if count < 2 {
|
||||||
warn!("Last admin cannot be banned in admins room");
|
warn!("Last admin cannot be banned in admins room");
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"Last admin cannot be banned in admins room.",
|
"Last admin cannot be banned in admins room.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -939,7 +949,7 @@ impl Service {
|
||||||
false,
|
false,
|
||||||
)? {
|
)? {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"User cannot redact this event.",
|
"User cannot redact this event.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -960,7 +970,7 @@ impl Service {
|
||||||
false,
|
false,
|
||||||
)? {
|
)? {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::forbidden(),
|
||||||
"User cannot redact this event.",
|
"User cannot redact this event.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -1100,14 +1110,33 @@ impl Service {
|
||||||
|
|
||||||
/// Replace a PDU with the redacted form.
|
/// Replace a PDU with the redacted form.
|
||||||
#[tracing::instrument(skip(self, reason))]
|
#[tracing::instrument(skip(self, reason))]
|
||||||
pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> {
|
pub fn redact_pdu(
|
||||||
|
&self,
|
||||||
|
event_id: &EventId,
|
||||||
|
reason: &PduEvent,
|
||||||
|
shortroomid: u64,
|
||||||
|
) -> Result<()> {
|
||||||
// TODO: Don't reserialize, keep original json
|
// TODO: Don't reserialize, keep original json
|
||||||
if let Some(pdu_id) = self.get_pdu_id(event_id)? {
|
if let Some(pdu_id) = self.get_pdu_id(event_id)? {
|
||||||
let mut pdu = self
|
let mut pdu = self
|
||||||
.get_pdu_from_id(&pdu_id)?
|
.get_pdu_from_id(&pdu_id)?
|
||||||
.ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?;
|
.ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?;
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct ExtractBody {
|
||||||
|
body: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Ok(content) = serde_json::from_str::<ExtractBody>(pdu.content.get()) {
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.search
|
||||||
|
.deindex_pdu(shortroomid, &pdu_id, &content.body)?;
|
||||||
|
}
|
||||||
|
|
||||||
let room_version_id = services().rooms.state.get_room_version(&pdu.room_id)?;
|
let room_version_id = services().rooms.state.get_room_version(&pdu.room_id)?;
|
||||||
pdu.redact(room_version_id, reason)?;
|
pdu.redact(room_version_id, reason)?;
|
||||||
|
|
||||||
self.replace_pdu(
|
self.replace_pdu(
|
||||||
&pdu_id,
|
&pdu_id,
|
||||||
&utils::to_canonical_object(&pdu).expect("PDU is an object"),
|
&utils::to_canonical_object(&pdu).expect("PDU is an object"),
|
||||||
|
@ -1188,7 +1217,7 @@ impl Service {
|
||||||
&self,
|
&self,
|
||||||
origin: &ServerName,
|
origin: &ServerName,
|
||||||
pdu: Box<RawJsonValue>,
|
pdu: Box<RawJsonValue>,
|
||||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let (event_id, value, room_id) = server_server::parse_incoming_pdu(&pdu)?;
|
let (event_id, value, room_id) = server_server::parse_incoming_pdu(&pdu)?;
|
||||||
|
|
||||||
|
|
|
@ -86,7 +86,7 @@ impl Service {
|
||||||
|
|
||||||
if !hash_matches {
|
if !hash_matches {
|
||||||
uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody {
|
uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody {
|
||||||
kind: ErrorKind::Forbidden,
|
kind: ErrorKind::forbidden(),
|
||||||
message: "Invalid username or password.".to_owned(),
|
message: "Invalid username or password.".to_owned(),
|
||||||
});
|
});
|
||||||
return Ok((false, uiaainfo));
|
return Ok((false, uiaainfo));
|
||||||
|
@ -101,7 +101,7 @@ impl Service {
|
||||||
uiaainfo.completed.push(AuthType::RegistrationToken);
|
uiaainfo.completed.push(AuthType::RegistrationToken);
|
||||||
} else {
|
} else {
|
||||||
uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody {
|
uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody {
|
||||||
kind: ErrorKind::Forbidden,
|
kind: ErrorKind::forbidden(),
|
||||||
message: "Invalid registration token.".to_owned(),
|
message: "Invalid registration token.".to_owned(),
|
||||||
});
|
});
|
||||||
return Ok((false, uiaainfo));
|
return Ok((false, uiaainfo));
|
||||||
|
|
|
@ -211,4 +211,10 @@ pub trait Data: Send + Sync {
|
||||||
fn create_filter(&self, user_id: &UserId, filter: &FilterDefinition) -> Result<String>;
|
fn create_filter(&self, user_id: &UserId, filter: &FilterDefinition) -> Result<String>;
|
||||||
|
|
||||||
fn get_filter(&self, user_id: &UserId, filter_id: &str) -> Result<Option<FilterDefinition>>;
|
fn get_filter(&self, user_id: &UserId, filter_id: &str) -> Result<Option<FilterDefinition>>;
|
||||||
|
|
||||||
|
// Creates an OpenID token, which can be used to prove that a user has access to an account (primarily for integrations)
|
||||||
|
fn create_openid_token(&self, user_id: &UserId) -> Result<(String, u64)>;
|
||||||
|
|
||||||
|
/// Find out which user an OpenID access token belongs to.
|
||||||
|
fn find_from_openid_token(&self, token: &str) -> Result<Option<OwnedUserId>>;
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,6 @@ pub use data::Data;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
device::Device,
|
device::Device,
|
||||||
error::ErrorKind,
|
|
||||||
filter::FilterDefinition,
|
filter::FilterDefinition,
|
||||||
sync::sync_events::{
|
sync::sync_events::{
|
||||||
self,
|
self,
|
||||||
|
@ -20,7 +19,7 @@ use ruma::{
|
||||||
events::AnyToDeviceEvent,
|
events::AnyToDeviceEvent,
|
||||||
serde::Raw,
|
serde::Raw,
|
||||||
DeviceId, DeviceKeyAlgorithm, DeviceKeyId, OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri,
|
DeviceId, DeviceKeyAlgorithm, DeviceKeyId, OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri,
|
||||||
OwnedRoomId, OwnedUserId, RoomAliasId, UInt, UserId,
|
OwnedRoomId, OwnedUserId, UInt, UserId,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{services, Error, Result};
|
use crate::{services, Error, Result};
|
||||||
|
@ -262,19 +261,14 @@ impl Service {
|
||||||
|
|
||||||
/// Check if a user is an admin
|
/// Check if a user is an admin
|
||||||
pub fn is_admin(&self, user_id: &UserId) -> Result<bool> {
|
pub fn is_admin(&self, user_id: &UserId) -> Result<bool> {
|
||||||
let admin_room_alias_id =
|
if let Some(admin_room_id) = services().admin.get_admin_room()? {
|
||||||
RoomAliasId::parse(format!("#admins:{}", services().globals.server_name()))
|
services()
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?;
|
.rooms
|
||||||
let admin_room_id = services()
|
.state_cache
|
||||||
.rooms
|
.is_joined(user_id, &admin_room_id)
|
||||||
.alias
|
} else {
|
||||||
.resolve_local_alias(&admin_room_alias_id)?
|
Ok(false)
|
||||||
.unwrap();
|
}
|
||||||
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.is_joined(user_id, &admin_room_id)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new user account on this homeserver.
|
/// Create a new user account on this homeserver.
|
||||||
|
@ -598,6 +592,16 @@ impl Service {
|
||||||
) -> Result<Option<FilterDefinition>> {
|
) -> Result<Option<FilterDefinition>> {
|
||||||
self.db.get_filter(user_id, filter_id)
|
self.db.get_filter(user_id, filter_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Creates an OpenID token, which can be used to prove that a user has access to an account (primarily for integrations)
|
||||||
|
pub fn create_openid_token(&self, user_id: &UserId) -> Result<(String, u64)> {
|
||||||
|
self.db.create_openid_token(user_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Find out which user an OpenID access token belongs to.
|
||||||
|
pub fn find_from_openid_token(&self, token: &str) -> Result<Option<OwnedUserId>> {
|
||||||
|
self.db.find_from_openid_token(token)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Ensure that a user only sees signatures from themselves and the target user
|
/// Ensure that a user only sees signatures from themselves and the target user
|
||||||
|
|
|
@ -128,7 +128,7 @@ impl Error {
|
||||||
kind.clone(),
|
kind.clone(),
|
||||||
match kind {
|
match kind {
|
||||||
WrongRoomKeysVersion { .. }
|
WrongRoomKeysVersion { .. }
|
||||||
| Forbidden
|
| Forbidden { .. }
|
||||||
| GuestAccessForbidden
|
| GuestAccessForbidden
|
||||||
| ThreepidAuthFailed
|
| ThreepidAuthFailed
|
||||||
| ThreepidDenied => StatusCode::FORBIDDEN,
|
| ThreepidDenied => StatusCode::FORBIDDEN,
|
||||||
|
|
Loading…
Add table
Reference in a new issue