Compare commits
203 commits
unstable-m
...
next
Author | SHA1 | Date | |
---|---|---|---|
|
3c93c81204 | ||
|
6767ca8bc8 | ||
|
f8d7ef04e6 | ||
|
892fb8846a | ||
|
bca8d1f70f | ||
|
65fe6b0ab5 | ||
|
fea85b0894 | ||
|
a7405cddc0 | ||
|
3df21e8257 | ||
|
e4d6202840 | ||
|
c4810a3a08 | ||
|
73d0536cd3 | ||
|
a6797ca0a2 | ||
|
cdd03dfec0 | ||
|
2bab8869d0 | ||
|
cbd3b07ca7 | ||
|
27d6d94355 | ||
|
a3716a7d5a | ||
|
a9c3867287 | ||
|
423b0928d5 | ||
|
44dd21f432 | ||
|
75a0f68349 | ||
|
8abab8c8a0 | ||
|
324e1beabf | ||
|
00c9ef7b56 | ||
|
6455e918be | ||
|
ea3e7045b4 | ||
|
b8a1b4fee5 | ||
|
d95345377b | ||
|
75322af8c7 | ||
|
11187b3fad | ||
|
35ed731a46 | ||
|
1f313c6807 | ||
|
e70d27af98 | ||
|
ba8429cafe | ||
|
7a4d0f6fe8 | ||
|
2f45a907f9 | ||
|
de0deda179 | ||
|
62f1da053f | ||
|
602c56cae9 | ||
|
4b9520b5ad | ||
|
9014e43ce1 | ||
|
ffc57f8997 | ||
|
fd19dda5cb | ||
|
dc0fa09a57 | ||
|
ba1138aaa3 | ||
|
6398136163 | ||
|
16af8b58ae | ||
|
7a5b893013 | ||
|
c453d45598 | ||
|
144d548ef7 | ||
|
7b259272ce | ||
|
48c1f3bdba | ||
|
dd19877528 | ||
|
ba2a5a6115 | ||
|
a36ccff06a | ||
|
39b4932725 | ||
|
c45e52f45a | ||
|
1dbb3433e0 | ||
|
efecb78888 | ||
|
f25a0b49eb | ||
|
b46000fadc | ||
|
7b19618136 | ||
|
19154a9f70 | ||
|
ec8dfc283c | ||
|
be1b8b68a7 | ||
|
6c2eb4c786 | ||
|
3df791e030 | ||
|
9374b74e77 | ||
|
c732c7c97f | ||
|
33c9da75ec | ||
|
59d7674b2a | ||
|
6bcc2f80b8 | ||
|
817f382c5f | ||
|
a888c7cb16 | ||
|
47aadcea1d | ||
|
9b8ec21e6e | ||
|
e51f60e437 | ||
|
11990e7524 | ||
|
3ad7675bbf | ||
|
e2d91e26d6 | ||
|
20d9f3fd5d | ||
|
965b6df83d | ||
|
8876d54d78 | ||
|
d8badaf64b | ||
|
08485ea5e4 | ||
|
eec9b9ed87 | ||
|
256dae983b | ||
|
79c4bb17ca | ||
|
57a24f234d | ||
|
a4c973e57e | ||
|
f9953c31fc | ||
|
6b669d2f4d | ||
|
90c9794221 | ||
|
358164f49d | ||
|
70b07dfabf | ||
|
d97f5aa3b8 | ||
|
bed9072a69 | ||
|
2a2b9554c8 | ||
|
3d9d975a9f | ||
|
96cc1f6abd | ||
|
55259329a3 | ||
|
d796fe7cd3 | ||
|
3336d3f812 | ||
|
c9ee4a920e | ||
|
5570f5f3da | ||
|
38200163d6 | ||
|
9db1f5a13c | ||
|
0074aca0ef | ||
|
3ecf835b50 | ||
|
6e913bfec4 | ||
|
c1f695653b | ||
|
e6b6cc77d1 | ||
|
b69a74961b | ||
|
622aa574b6 | ||
|
63ba157ef6 | ||
|
dfe2916357 | ||
|
b4a60c3f9a | ||
|
0b217232ac | ||
|
3b7c001350 | ||
|
10412d4be9 | ||
|
3c8879771c | ||
|
4fac7ca169 | ||
|
a23bfdb3f0 | ||
|
60fe238893 | ||
|
cd4e4fe7ac | ||
|
0a7ac058a0 | ||
|
c90e4816b7 | ||
|
2b5295aa29 | ||
|
df0ad2d07c | ||
|
bd5d9a7560 | ||
|
14ede9898d | ||
|
0220e9e9d1 | ||
|
f62db723f7 | ||
|
a499c80d1b | ||
|
5760d98192 | ||
|
2d3f64c1e5 | ||
|
aff97e4032 | ||
|
a56139549f | ||
|
3b6928ebcf | ||
|
61cd2892b8 | ||
|
acef61a3cc | ||
|
c6a7563126 | ||
|
779cebcd77 | ||
|
3b3466fd51 | ||
|
a854ce5cf6 | ||
|
414056442a | ||
|
7c83372336 | ||
|
a854a46c24 | ||
|
429f80548f | ||
|
a140bf8a6f | ||
|
74db555336 | ||
|
08636ef236 | ||
|
3086271139 | ||
|
e40aed3a7d | ||
|
0c0c9549b9 | ||
|
53d3f9ae89 | ||
|
7ace9b0dff | ||
|
624654a88b | ||
|
461236f3fb | ||
|
1c4ae8d268 | ||
|
89c1c2109c | ||
|
1bae8b35a9 | ||
|
00d6aeddb6 | ||
|
18f93ae8f3 | ||
|
6c9c1b5afe | ||
|
27753b1d96 | ||
|
61cb186b5b | ||
|
8c6ffb6bfc | ||
|
2656f6f435 | ||
|
b48e1300f2 | ||
|
1474b94db6 | ||
|
e19e6a4bff | ||
|
95f5c61843 | ||
|
4b288fd22f | ||
|
2d8c551cd5 | ||
|
eb6801290b | ||
|
7a7c09785e | ||
|
d22bf5182b | ||
|
54e0e2a14c | ||
|
475a68cbb9 | ||
|
92817213d5 | ||
|
ab8592526f | ||
|
561a103140 | ||
|
b5e21f761b | ||
|
9e6ce8326f | ||
|
e88d137bd7 | ||
|
7f63948db9 | ||
|
2c73c3adbb | ||
|
9497713a79 | ||
|
fb4217486f | ||
|
1fb5bcf98f | ||
|
34e0e710cb | ||
|
ac22b1bed1 | ||
|
8175bc1246 | ||
|
eb7ac91cd5 | ||
|
5a7bb1e8f1 | ||
|
520806d413 | ||
|
9646439a94 | ||
|
fac995036a | ||
|
18bfd79ef2 | ||
|
a3b8eea9b4 | ||
|
d39d30008a |
98 changed files with 5430 additions and 2953 deletions
|
@ -6,6 +6,10 @@ stages:
|
|||
variables:
|
||||
# Makes some things print in color
|
||||
TERM: ansi
|
||||
# Faster cache and artifact compression / decompression
|
||||
FF_USE_FASTZIP: true
|
||||
# Print progress reports for cache and artifact transfers
|
||||
TRANSFER_METER_FREQUENCY: 5s
|
||||
|
||||
# Avoid duplicate pipelines
|
||||
# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
|
||||
|
@ -21,8 +25,8 @@ before_script:
|
|||
- if command -v nix > /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi
|
||||
|
||||
# Add our own binary cache
|
||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://nix.computer.surgery/conduit" >> /etc/nix/nix.conf; fi
|
||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduit:ZGAf6P6LhNvnoJJ3Me3PRg7tlLSrPxcQ2RiE5LIppjo=" >> /etc/nix/nix.conf; fi
|
||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://attic.conduit.rs/conduit" >> /etc/nix/nix.conf; fi
|
||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduit:ddcaWZiWm0l0IXZlO8FERRdWvEufwmd0Negl1P+c0Ns=" >> /etc/nix/nix.conf; fi
|
||||
|
||||
# Add alternate binary cache
|
||||
- if command -v nix > /dev/null && [ -n "$ATTIC_ENDPOINT" ]; then echo "extra-substituters = $ATTIC_ENDPOINT" >> /etc/nix/nix.conf; fi
|
||||
|
@ -45,9 +49,12 @@ before_script:
|
|||
# Set CARGO_HOME to a cacheable path
|
||||
- export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo"
|
||||
|
||||
# Cache attic client
|
||||
- if command -v nix > /dev/null; then ./bin/nix-build-and-cache --inputs-from . attic; fi
|
||||
|
||||
ci:
|
||||
stage: ci
|
||||
image: nixos/nix:2.20.4
|
||||
image: nixos/nix:2.22.0
|
||||
script:
|
||||
# Cache the inputs required for the devShell
|
||||
- ./bin/nix-build-and-cache .#devShells.x86_64-linux.default.inputDerivation
|
||||
|
@ -72,7 +79,7 @@ ci:
|
|||
|
||||
artifacts:
|
||||
stage: artifacts
|
||||
image: nixos/nix:2.20.4
|
||||
image: nixos/nix:2.22.0
|
||||
script:
|
||||
- ./bin/nix-build-and-cache .#static-x86_64-unknown-linux-musl
|
||||
- cp result/bin/conduit x86_64-unknown-linux-musl
|
||||
|
@ -96,6 +103,11 @@ artifacts:
|
|||
- ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl
|
||||
- cp result/bin/conduit aarch64-unknown-linux-musl
|
||||
|
||||
- mkdir -p target/aarch64-unknown-linux-musl/release
|
||||
- cp result/bin/conduit target/aarch64-unknown-linux-musl/release
|
||||
- direnv exec . cargo deb --no-strip --no-build --target aarch64-unknown-linux-musl
|
||||
- mv target/aarch64-unknown-linux-musl/debian/*.deb aarch64-unknown-linux-musl.deb
|
||||
|
||||
- ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl
|
||||
- cp result oci-image-arm64v8.tar.gz
|
||||
|
||||
|
@ -107,6 +119,7 @@ artifacts:
|
|||
- x86_64-unknown-linux-musl
|
||||
- aarch64-unknown-linux-musl
|
||||
- x86_64-unknown-linux-musl.deb
|
||||
- aarch64-unknown-linux-musl.deb
|
||||
- oci-image-amd64.tar.gz
|
||||
- oci-image-arm64v8.tar.gz
|
||||
- public
|
||||
|
|
1605
Cargo.lock
generated
1605
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
189
Cargo.toml
189
Cargo.toml
|
@ -1,27 +1,25 @@
|
|||
# Keep alphabetically sorted
|
||||
[workspace.lints.rust]
|
||||
explicit_outlives_requirements = "warn"
|
||||
unused_qualifications = "warn"
|
||||
|
||||
# Keep alphabetically sorted
|
||||
[workspace.lints.clippy]
|
||||
cloned_instead_of_copied = "warn"
|
||||
dbg_macro = "warn"
|
||||
str_to_string = "warn"
|
||||
|
||||
[package]
|
||||
name = "conduit"
|
||||
description = "A Matrix homeserver written in Rust"
|
||||
license = "Apache-2.0"
|
||||
authors = ["timokoesters <timo@koesters.xyz>"]
|
||||
homepage = "https://conduit.rs"
|
||||
repository = "https://gitlab.com/famedly/conduit"
|
||||
readme = "README.md"
|
||||
version = "0.7.0-alpha"
|
||||
description = "A Matrix homeserver written in Rust"
|
||||
edition = "2021"
|
||||
homepage = "https://conduit.rs"
|
||||
license = "Apache-2.0"
|
||||
name = "conduit"
|
||||
readme = "README.md"
|
||||
repository = "https://gitlab.com/famedly/conduit"
|
||||
version = "0.10.0-alpha"
|
||||
|
||||
# See also `rust-toolchain.toml`
|
||||
rust-version = "1.75.0"
|
||||
rust-version = "1.79.0"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
|
@ -30,16 +28,24 @@ workspace = true
|
|||
|
||||
[dependencies]
|
||||
# Web framework
|
||||
axum = { version = "0.6.18", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true }
|
||||
axum-server = { version = "0.5.1", features = ["tls-rustls"] }
|
||||
axum = { version = "0.7", default-features = false, features = [
|
||||
"form",
|
||||
"http1",
|
||||
"http2",
|
||||
"json",
|
||||
"matched-path",
|
||||
], optional = true }
|
||||
axum-extra = { version = "0.9", features = ["typed-header"] }
|
||||
axum-server = { version = "0.6", features = ["tls-rustls"] }
|
||||
tower = { version = "0.4.13", features = ["util"] }
|
||||
tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitive-headers", "trace", "util"] }
|
||||
|
||||
# Used for matrix spec type definitions and helpers
|
||||
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
||||
ruma = { git = "https://github.com/ruma/ruma", rev = "5495b85aa311c2805302edb0a7de40399e22b397", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
||||
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
||||
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
||||
tower-http = { version = "0.5", features = [
|
||||
"add-extension",
|
||||
"cors",
|
||||
"sensitive-headers",
|
||||
"trace",
|
||||
"util",
|
||||
] }
|
||||
tower-service = "0.3"
|
||||
|
||||
# Async runtime and utilities
|
||||
tokio = { version = "1.28.1", features = ["fs", "macros", "signal", "sync"] }
|
||||
|
@ -50,9 +56,9 @@ persy = { version = "1.4.4", optional = true, features = ["background_ops"] }
|
|||
|
||||
# Used for the http request / response body type for Ruma endpoints used with reqwest
|
||||
bytes = "1.4.0"
|
||||
http = "0.2.9"
|
||||
http = "1"
|
||||
# Used to find data directory for default db path
|
||||
directories = "4.0.1"
|
||||
directories = "5"
|
||||
# Used for ruma wrapper
|
||||
serde_json = { version = "1.0.96", features = ["raw_value"] }
|
||||
# Used for appservice registration files
|
||||
|
@ -62,34 +68,51 @@ serde = { version = "1.0.163", features = ["rc"] }
|
|||
# Used for secure identifiers
|
||||
rand = "0.8.5"
|
||||
# Used to hash passwords
|
||||
rust-argon2 = "1.0.0"
|
||||
rust-argon2 = "2"
|
||||
# Used to send requests
|
||||
hyper = "0.14.26"
|
||||
reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls-native-roots", "socks"] }
|
||||
hyper = "1.1"
|
||||
hyper-util = { version = "0.1", features = [
|
||||
"client",
|
||||
"client-legacy",
|
||||
"http1",
|
||||
"http2",
|
||||
] }
|
||||
reqwest = { version = "0.12", default-features = false, features = [
|
||||
"rustls-tls-native-roots",
|
||||
"socks",
|
||||
] }
|
||||
# Used for conduit::Error type
|
||||
thiserror = "1.0.40"
|
||||
# Used to generate thumbnails for images
|
||||
image = { version = "0.24.6", default-features = false, features = ["jpeg", "png", "gif"] }
|
||||
image = { version = "0.25", default-features = false, features = [
|
||||
"gif",
|
||||
"jpeg",
|
||||
"png",
|
||||
] }
|
||||
# Used to encode server public key
|
||||
base64 = "0.21.2"
|
||||
base64 = "0.22"
|
||||
# Used when hashing the state
|
||||
ring = "0.17.7"
|
||||
# Used when querying the SRV record of other servers
|
||||
trust-dns-resolver = "0.22.0"
|
||||
hickory-resolver = "0.24"
|
||||
# Used to find matching events for appservices
|
||||
regex = "1.8.1"
|
||||
# jwt jsonwebtokens
|
||||
jsonwebtoken = "9.2.0"
|
||||
# Performance measurements
|
||||
tracing = { version = "0.1.37", features = [] }
|
||||
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
|
||||
opentelemetry = "0.22"
|
||||
opentelemetry-jaeger-propagator = "0.1"
|
||||
opentelemetry-otlp = "0.15"
|
||||
opentelemetry_sdk = { version = "0.22", features = ["rt-tokio"] }
|
||||
tracing = "0.1.37"
|
||||
tracing-flame = "0.2.0"
|
||||
opentelemetry = { version = "0.18.0", features = ["rt-tokio"] }
|
||||
opentelemetry-jaeger = { version = "0.17.0", features = ["rt-tokio"] }
|
||||
tracing-opentelemetry = "0.18.0"
|
||||
tracing-opentelemetry = "0.23"
|
||||
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
|
||||
|
||||
lru-cache = "0.1.2"
|
||||
rusqlite = { version = "0.29.0", optional = true, features = ["bundled"] }
|
||||
parking_lot = { version = "0.12.1", optional = true }
|
||||
rusqlite = { version = "0.31", optional = true, features = ["bundled"] }
|
||||
|
||||
# crossbeam = { version = "0.8.2", optional = true }
|
||||
num_cpus = "1.15.0"
|
||||
threadpool = "1.8.1"
|
||||
|
@ -102,44 +125,66 @@ thread_local = "1.1.7"
|
|||
hmac = "0.12.1"
|
||||
sha-1 = "0.10.1"
|
||||
# used for conduit's CLI and admin room command parsing
|
||||
clap = { version = "4.3.0", default-features = false, features = ["std", "derive", "help", "usage", "error-context"] }
|
||||
clap = { version = "4.3.0", default-features = false, features = [
|
||||
"derive",
|
||||
"error-context",
|
||||
"help",
|
||||
"std",
|
||||
"string",
|
||||
"usage",
|
||||
] }
|
||||
futures-util = { version = "0.3.28", default-features = false }
|
||||
# Used for reading the configuration from conduit.toml & environment variables
|
||||
figment = { version = "0.10.8", features = ["env", "toml"] }
|
||||
|
||||
tikv-jemalloc-ctl = { version = "0.5.0", features = ["use_std"], optional = true }
|
||||
tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_supported_platforms"], optional = true }
|
||||
lazy_static = "1.4.0"
|
||||
async-trait = "0.1.68"
|
||||
# Validating urls in config
|
||||
url = { version = "2", features = ["serde"] }
|
||||
|
||||
# Used to make working with iterators easier, was already a transitive depdendency
|
||||
itertools = "0.12"
|
||||
async-trait = "0.1.68"
|
||||
tikv-jemallocator = { version = "0.5.0", features = [
|
||||
"unprefixed_malloc_on_supported_platforms",
|
||||
], optional = true }
|
||||
|
||||
sd-notify = { version = "0.4.1", optional = true }
|
||||
|
||||
[dependencies.rocksdb]
|
||||
package = "rust-rocksdb"
|
||||
version = "0.22.7"
|
||||
optional = true
|
||||
# Used for matrix spec type definitions and helpers
|
||||
[dependencies.ruma]
|
||||
features = [
|
||||
"multi-threaded-cf",
|
||||
"zstd",
|
||||
"lz4",
|
||||
"appservice-api-c",
|
||||
"client-api",
|
||||
"compat",
|
||||
"federation-api",
|
||||
"push-gateway-api-c",
|
||||
"rand",
|
||||
"ring-compat",
|
||||
"server-util",
|
||||
"state-res",
|
||||
"unstable-exhaustive-types",
|
||||
"unstable-msc2448",
|
||||
"unstable-msc3575",
|
||||
"unstable-unspecified",
|
||||
]
|
||||
git = "https://github.com/ruma/ruma"
|
||||
|
||||
[dependencies.rocksdb]
|
||||
features = ["lz4", "multi-threaded-cf", "zstd"]
|
||||
optional = true
|
||||
package = "rust-rocksdb"
|
||||
version = "0.25"
|
||||
|
||||
[target.'cfg(unix)'.dependencies]
|
||||
nix = { version = "0.28", features = ["resource"] }
|
||||
|
||||
[features]
|
||||
default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "systemd"]
|
||||
default = ["backend_rocksdb", "backend_sqlite", "conduit_bin", "systemd"]
|
||||
#backend_sled = ["sled"]
|
||||
backend_persy = ["persy", "parking_lot"]
|
||||
backend_persy = ["parking_lot", "persy"]
|
||||
backend_sqlite = ["sqlite"]
|
||||
#backend_heed = ["heed", "crossbeam"]
|
||||
backend_rocksdb = ["rocksdb"]
|
||||
jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"]
|
||||
sqlite = ["rusqlite", "parking_lot", "tokio/signal"]
|
||||
conduit_bin = ["axum"]
|
||||
jemalloc = ["tikv-jemallocator"]
|
||||
sqlite = ["parking_lot", "rusqlite", "tokio/signal"]
|
||||
systemd = ["sd-notify"]
|
||||
|
||||
[[bin]]
|
||||
|
@ -152,35 +197,45 @@ name = "conduit"
|
|||
path = "src/lib.rs"
|
||||
|
||||
[package.metadata.deb]
|
||||
name = "matrix-conduit"
|
||||
maintainer = "Paul van Tilburg <paul@luon.net>"
|
||||
assets = [
|
||||
[
|
||||
"README.md",
|
||||
"usr/share/doc/matrix-conduit/",
|
||||
"644",
|
||||
],
|
||||
[
|
||||
"debian/README.md",
|
||||
"usr/share/doc/matrix-conduit/README.Debian",
|
||||
"644",
|
||||
],
|
||||
[
|
||||
"target/release/conduit",
|
||||
"usr/sbin/matrix-conduit",
|
||||
"755",
|
||||
],
|
||||
]
|
||||
conf-files = ["/etc/matrix-conduit/conduit.toml"]
|
||||
copyright = "2020, Timo Kösters <timo@koesters.xyz>"
|
||||
license-file = ["LICENSE", "3"]
|
||||
depends = "$auto, ca-certificates"
|
||||
extended-description = """\
|
||||
A fast Matrix homeserver that is optimized for smaller, personal servers, \
|
||||
instead of a server that has high scalability."""
|
||||
section = "net"
|
||||
priority = "optional"
|
||||
assets = [
|
||||
["debian/README.md", "usr/share/doc/matrix-conduit/README.Debian", "644"],
|
||||
["README.md", "usr/share/doc/matrix-conduit/", "644"],
|
||||
["target/release/conduit", "usr/sbin/matrix-conduit", "755"],
|
||||
]
|
||||
conf-files = [
|
||||
"/etc/matrix-conduit/conduit.toml"
|
||||
]
|
||||
license-file = ["LICENSE", "3"]
|
||||
maintainer = "Paul van Tilburg <paul@luon.net>"
|
||||
maintainer-scripts = "debian/"
|
||||
name = "matrix-conduit"
|
||||
priority = "optional"
|
||||
section = "net"
|
||||
systemd-units = { unit-name = "matrix-conduit" }
|
||||
|
||||
[profile.dev]
|
||||
lto = 'off'
|
||||
incremental = true
|
||||
lto = 'off'
|
||||
|
||||
[profile.release]
|
||||
lto = 'thin'
|
||||
codegen-units = 32
|
||||
incremental = true
|
||||
codegen-units=32
|
||||
lto = 'thin'
|
||||
# If you want to make flamegraphs, enable debug info:
|
||||
# debug = true
|
||||
|
||||
|
|
|
@ -56,6 +56,13 @@ If you have any questions, feel free to
|
|||
- Send an direct message to `@timokoesters:fachschaften.org` on Matrix
|
||||
- [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new)
|
||||
|
||||
#### Security
|
||||
|
||||
If you believe you have found a security issue, please send a message to [Timo](https://matrix.to/#/@timo:conduit.rs)
|
||||
and/or [Matthias](https://matrix.to/#/@matthias:ahouansou.cz) on Matrix, or send an email to
|
||||
[conduit@koesters.xyz](mailto:conduit@koesters.xyz). Please do not disclose details about the issue to anyone else before
|
||||
a fix is released publically.
|
||||
|
||||
#### Thanks to
|
||||
|
||||
Thanks to FUTO, Famedly, Prototype Fund (DLR and German BMBF) and all individuals for financially supporting this project.
|
||||
|
|
|
@ -2,25 +2,39 @@
|
|||
|
||||
set -euo pipefail
|
||||
|
||||
# The first argument must be the desired installable
|
||||
INSTALLABLE="$1"
|
||||
|
||||
# Build the installable and forward any other arguments too
|
||||
nix build "$@"
|
||||
# Build the installable and forward any other arguments too. Also, use
|
||||
# nix-output-monitor instead if it's available.
|
||||
if command -v nom &> /dev/null; then
|
||||
nom build "$@"
|
||||
else
|
||||
nix build "$@"
|
||||
fi
|
||||
|
||||
if [ ! -z ${ATTIC_TOKEN+x} ]; then
|
||||
nix run --inputs-from . attic -- \
|
||||
login \
|
||||
conduit \
|
||||
"${ATTIC_ENDPOINT:-https://nix.computer.surgery/conduit}" \
|
||||
"${ATTIC_ENDPOINT:-https://attic.conduit.rs/conduit}" \
|
||||
"$ATTIC_TOKEN"
|
||||
|
||||
# Push the target installable and its build dependencies
|
||||
nix run --inputs-from . attic -- \
|
||||
push \
|
||||
conduit \
|
||||
"$(nix path-info "$INSTALLABLE" --derivation)" \
|
||||
"$(nix path-info "$INSTALLABLE")"
|
||||
readarray -t derivations < <(nix path-info "$@" --derivation)
|
||||
for derivation in "${derivations[@]}"; do
|
||||
cache+=(
|
||||
"$(nix-store --query --requisites --include-outputs "$derivation")"
|
||||
)
|
||||
done
|
||||
|
||||
# Upload them to Attic
|
||||
#
|
||||
# Use `xargs` and a here-string because something would probably explode if
|
||||
# several thousand arguments got passed to a command at once. Hopefully no
|
||||
# store paths include a newline in them.
|
||||
(
|
||||
IFS=$'\n'
|
||||
nix shell --inputs-from . attic -c xargs \
|
||||
attic push conduit <<< "${cache[*]}"
|
||||
)
|
||||
|
||||
else
|
||||
echo "\$ATTIC_TOKEN is unset, skipping uploading to the binary cache"
|
||||
fi
|
||||
|
|
|
@ -1,18 +1,21 @@
|
|||
[book]
|
||||
title = "Conduit"
|
||||
description = "Conduit is a simple, fast and reliable chat server for the Matrix protocol"
|
||||
language = "en"
|
||||
multilingual = false
|
||||
src = "docs"
|
||||
title = "Conduit"
|
||||
|
||||
[build]
|
||||
build-dir = "public"
|
||||
create-missing = true
|
||||
|
||||
[output.html]
|
||||
git-repository-url = "https://gitlab.com/famedly/conduit"
|
||||
edit-url-template = "https://gitlab.com/famedly/conduit/-/edit/next/{path}"
|
||||
git-repository-icon = "fa-git-square"
|
||||
git-repository-url = "https://gitlab.com/famedly/conduit"
|
||||
|
||||
[output.html.search]
|
||||
limit-results = 15
|
||||
|
||||
[output.html.code.hidelines]
|
||||
json = "~"
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM rust:1.75.0
|
||||
FROM rust:1.79.0
|
||||
|
||||
WORKDIR /workdir
|
||||
|
||||
|
|
|
@ -17,14 +17,14 @@
|
|||
# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client
|
||||
# and
|
||||
# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server
|
||||
# for more information
|
||||
# for more information, or continue below to see how conduit can do this for you.
|
||||
|
||||
# YOU NEED TO EDIT THIS
|
||||
#server_name = "your.server.name"
|
||||
|
||||
database_backend = "rocksdb"
|
||||
# This is the only directory where Conduit will save its data
|
||||
database_path = "/var/lib/matrix-conduit/"
|
||||
database_backend = "rocksdb"
|
||||
|
||||
# The port Conduit will be running on. You need to set up a reverse proxy in
|
||||
# your web server (e.g. apache or nginx), so all requests to /_matrix on port
|
||||
|
@ -38,8 +38,14 @@ max_request_size = 20_000_000 # in bytes
|
|||
# Enables registration. If set to false, no users can register on this server.
|
||||
allow_registration = true
|
||||
|
||||
allow_federation = true
|
||||
# A static registration token that new users will have to provide when creating
|
||||
# an account. YOU NEED TO EDIT THIS.
|
||||
# - Insert a password that users will have to enter on registration
|
||||
# - Start the line with '#' to remove the condition
|
||||
registration_token = ""
|
||||
|
||||
allow_check_for_updates = true
|
||||
allow_federation = true
|
||||
|
||||
# Enable the display name lightning bolt on registration.
|
||||
enable_lightning_bolt = true
|
||||
|
@ -59,3 +65,10 @@ trusted_servers = ["matrix.org"]
|
|||
|
||||
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
||||
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
|
||||
|
||||
[global.well_known]
|
||||
# Conduit handles the /.well-known/matrix/* endpoints, making both clients and servers try to access conduit with the host
|
||||
# server_name and port 443 by default.
|
||||
# If you want to override these defaults, uncomment and edit the following lines accordingly:
|
||||
#server = your.server.name:443
|
||||
#client = https://your.server.name
|
||||
|
|
18
debian/postinst
vendored
18
debian/postinst
vendored
|
@ -72,12 +72,30 @@ max_request_size = 20_000_000 # in bytes
|
|||
# Enables registration. If set to false, no users can register on this server.
|
||||
allow_registration = true
|
||||
|
||||
# A static registration token that new users will have to provide when creating
|
||||
# an account.
|
||||
# - Insert a password that users will have to enter on registration
|
||||
# - Start the line with '#' to remove the condition
|
||||
#registration_token = ""
|
||||
|
||||
allow_federation = true
|
||||
allow_check_for_updates = true
|
||||
|
||||
# Enable the display name lightning bolt on registration.
|
||||
enable_lightning_bolt = true
|
||||
|
||||
# Servers listed here will be used to gather public keys of other servers.
|
||||
# Generally, copying this exactly should be enough. (Currently, Conduit doesn't
|
||||
# support batched key requests, so this list should only contain Synapse
|
||||
# servers.)
|
||||
trusted_servers = ["matrix.org"]
|
||||
|
||||
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||
|
||||
# Controls the log verbosity. See also [here][0].
|
||||
#
|
||||
# [0]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives
|
||||
#log = "..."
|
||||
EOF
|
||||
fi
|
||||
;;
|
||||
|
|
|
@ -2,7 +2,8 @@
|
|||
|
||||
- [Introduction](introduction.md)
|
||||
|
||||
- [Example configuration](configuration.md)
|
||||
- [Configuration](configuration.md)
|
||||
- [Delegation](delegation.md)
|
||||
- [Deploying](deploying.md)
|
||||
- [Generic](deploying/generic.md)
|
||||
- [Debian](deploying/debian.md)
|
||||
|
@ -10,3 +11,4 @@
|
|||
- [NixOS](deploying/nixos.md)
|
||||
- [TURN](turn.md)
|
||||
- [Appservices](appservices.md)
|
||||
- [FAQ](faq.md)
|
||||
|
|
|
@ -1,5 +1,114 @@
|
|||
# Example configuration
|
||||
# Configuration
|
||||
|
||||
``` toml
|
||||
{{#include ../conduit-example.toml}}
|
||||
**Conduit** is configured using a TOML file. The configuration file is loaded from the path specified by the `CONDUIT_CONFIG` environment variable.
|
||||
|
||||
> **Note:** The configuration file is required to run Conduit. If the `CONDUIT_CONFIG` environment variable is not set, Conduit will exit with an error.
|
||||
|
||||
> **Note:** If you update the configuration file, you must restart Conduit for the changes to take effect
|
||||
|
||||
> **Note:** You can also configure Conduit by using `CONDUIT_{field_name}` environment variables. To set values inside a table, use `CONDUIT_{table_name}__{field_name}`. Example: `CONDUIT_SERVER_NAME="example.org"`
|
||||
|
||||
Conduit's configuration file is divided into the following sections:
|
||||
|
||||
- [Global](#global)
|
||||
- [TLS](#tls)
|
||||
- [Proxy](#proxy)
|
||||
|
||||
|
||||
## Global
|
||||
|
||||
The `global` section contains the following fields:
|
||||
|
||||
> **Note:** The `*` symbol indicates that the field is required, and the values in **parentheses** are the possible values
|
||||
|
||||
| Field | Type | Description | Default |
|
||||
| --- | --- | --- | --- |
|
||||
| `address` | `string` | The address to bind to | `"127.0.0.1"` |
|
||||
| `port` | `integer` | The port to bind to | `8000` |
|
||||
| `tls` | `table` | See the [TLS configuration](#tls) | N/A |
|
||||
| `server_name`_*_ | `string` | The server name | N/A |
|
||||
| `database_backend`_*_ | `string` | The database backend to use (`"rocksdb"` *recommended*, `"sqlite"`) | N/A |
|
||||
| `database_path`_*_ | `string` | The path to the database file/dir | N/A |
|
||||
| `db_cache_capacity_mb` | `float` | The cache capacity, in MB | `300.0` |
|
||||
| `enable_lightning_bolt` | `boolean` | Add `⚡️` emoji to end of user's display name | `true` |
|
||||
| `allow_check_for_updates` | `boolean` | Allow Conduit to check for updates | `true` |
|
||||
| `conduit_cache_capacity_modifier` | `float` | The value to multiply the default cache capacity by | `1.0` |
|
||||
| `rocksdb_max_open_files` | `integer` | The maximum number of open files | `1000` |
|
||||
| `pdu_cache_capacity` | `integer` | The maximum number of Persisted Data Units (PDUs) to cache | `150000` |
|
||||
| `cleanup_second_interval` | `integer` | How often conduit should clean up the database, in seconds | `60` |
|
||||
| `max_request_size` | `integer` | The maximum request size, in bytes | `20971520` (20 MiB) |
|
||||
| `max_concurrent_requests` | `integer` | The maximum number of concurrent requests | `100` |
|
||||
| `max_fetch_prev_events` | `integer` | The maximum number of previous events to fetch per request if conduit notices events are missing | `100` |
|
||||
| `allow_registration` | `boolean` | Opens your homeserver to public registration | `false` |
|
||||
| `registration_token` | `string` | The token users need to have when registering to your homeserver | N/A |
|
||||
| `allow_encryption` | `boolean` | Allow users to enable encryption in their rooms | `true` |
|
||||
| `allow_federation` | `boolean` | Allow federation with other servers | `true` |
|
||||
| `allow_room_creation` | `boolean` | Allow users to create rooms | `true` |
|
||||
| `allow_unstable_room_versions` | `boolean` | Allow users to create and join rooms with unstable versions | `true` |
|
||||
| `default_room_version` | `string` | The default room version (`"6"`-`"10"`)| `"10"` |
|
||||
| `allow_jaeger` | `boolean` | Allow Jaeger tracing | `false` |
|
||||
| `tracing_flame` | `boolean` | Enable flame tracing | `false` |
|
||||
| `proxy` | `table` | See the [Proxy configuration](#proxy) | N/A |
|
||||
| `jwt_secret` | `string` | The secret used in the JWT to enable JWT login without it a 400 error will be returned | N/A |
|
||||
| `trusted_servers` | `array` | The list of trusted servers to gather public keys of offline servers | `["matrix.org"]` |
|
||||
| `log` | `string` | The log verbosity to use | `"warn"` |
|
||||
| `turn_username` | `string` | The TURN username | `""` |
|
||||
| `turn_password` | `string` | The TURN password | `""` |
|
||||
| `turn_uris` | `array` | The TURN URIs | `[]` |
|
||||
| `turn_secret` | `string` | The TURN secret | `""` |
|
||||
| `turn_ttl` | `integer` | The TURN TTL in seconds | `86400` |
|
||||
| `emergency_password` | `string` | Set a password to login as the `conduit` user in case of emergency | N/A |
|
||||
| `well_known_client` | `string` | Used for [delegation](delegation.md) | See [delegation](delegation.md) |
|
||||
| `well_known_server` | `string` | Used for [delegation](delegation.md) | See [delegation](delegation.md) |
|
||||
|
||||
|
||||
### TLS
|
||||
The `tls` table contains the following fields:
|
||||
- `certs`: The path to the public PEM certificate
|
||||
- `key`: The path to the PEM private key
|
||||
|
||||
#### Example
|
||||
```toml
|
||||
[global.tls]
|
||||
certs = "/path/to/cert.pem"
|
||||
key = "/path/to/key.pem"
|
||||
```
|
||||
|
||||
|
||||
### Proxy
|
||||
You can choose what requests conduit should proxy (if any). The `proxy` table contains the following fields
|
||||
|
||||
#### Global
|
||||
The global option will proxy all outgoing requests. The `global` table contains the following fields:
|
||||
- `url`: The URL of the proxy server
|
||||
##### Example
|
||||
```toml
|
||||
[global.proxy.global]
|
||||
url = "https://example.com"
|
||||
```
|
||||
|
||||
#### By domain
|
||||
An array of tables that contain the following fields:
|
||||
- `url`: The URL of the proxy server
|
||||
- `include`: Domains that should be proxied (assumed to be `["*"]` if unset)
|
||||
- `exclude`: Domains that should not be proxied (takes precedent over `include`)
|
||||
|
||||
Both `include` and `exclude` allow for glob pattern matching.
|
||||
##### Example
|
||||
In this example, all requests to domains ending in `.onion` and `matrix.secretly-an-onion-domain.xyz`
|
||||
will be proxied via `socks://localhost:9050`, except for domains ending in `.myspecial.onion`. You can add as many `by_domain` tables as you need.
|
||||
```toml
|
||||
[[global.proxy.by_domain]]
|
||||
url = "socks5://localhost:9050"
|
||||
include = ["*.onion", "matrix.secretly-an-onion-domain.xyz"]
|
||||
exclude = ["*.clearnet.onion"]
|
||||
```
|
||||
|
||||
### Example
|
||||
|
||||
> **Note:** The following example is a minimal configuration file. You should replace the values with your own.
|
||||
|
||||
```toml
|
||||
[global]
|
||||
{{#include ../conduit-example.toml:22:}}
|
||||
```
|
||||
|
|
69
docs/delegation.md
Normal file
69
docs/delegation.md
Normal file
|
@ -0,0 +1,69 @@
|
|||
# Delegation
|
||||
|
||||
You can run Conduit on a separate domain than the actual server name (what shows up in user ids, aliases, etc.).
|
||||
For example you can have your users have IDs such as `@foo:example.org` and have aliases like `#bar:example.org`,
|
||||
while actually having Conduit hosted on the `matrix.example.org` domain. This is called delegation.
|
||||
|
||||
## Automatic (recommended)
|
||||
|
||||
Conduit has support for hosting delegation files by itself, and by default uses it to serve federation traffic on port 443.
|
||||
|
||||
With this method, you need to direct requests to `/.well-known/matrix/*` to Conduit in your reverse proxy.
|
||||
|
||||
This is only recommended if Conduit is on the same physical server as the server which serves your server name (e.g. example.org)
|
||||
as servers don't always seem to cache the response, leading to slower response times otherwise, but it should also work if you
|
||||
are connected to the server running Conduit using something like a VPN.
|
||||
|
||||
> **Note**: this will automatically allow you to use [sliding sync][0] without any extra configuration
|
||||
|
||||
To configure it, use the following options:
|
||||
| Field | Type | Description | Default |
|
||||
| --- | --- | --- | --- |
|
||||
| `well_known_client` | `String` | The URL that clients should use to connect to Conduit | `https://<server_name>` |
|
||||
| `well_known_server` | `String` | The hostname and port servers should use to connect to Conduit | `<server_name>:443` |
|
||||
|
||||
### Example
|
||||
|
||||
```toml
|
||||
[global]
|
||||
well_known_client = "https://matrix.example.org"
|
||||
well_known_server = "matrix.example.org:443"
|
||||
```
|
||||
|
||||
## Manual
|
||||
|
||||
Alternatively you can serve static JSON files to inform clients and servers how to connect to Conduit.
|
||||
|
||||
### Servers
|
||||
|
||||
For servers to discover how to access your domain, serve a response in the following format for `/.well-known/matrix/server`:
|
||||
|
||||
```json
|
||||
{
|
||||
"m.server": "matrix.example.org:443"
|
||||
}
|
||||
```
|
||||
Where `matrix.example.org` is the domain and `443` is the port Conduit is accessible at.
|
||||
|
||||
### Clients
|
||||
|
||||
For clients to discover how to access your domain, serve a response in the following format for `/.well-known/matrix/client`:
|
||||
```json
|
||||
{
|
||||
"m.homeserver": {
|
||||
"base_url": "https://matrix.example.org"
|
||||
}
|
||||
}
|
||||
```
|
||||
Where `matrix.example.org` is the URL Conduit is accessible at.
|
||||
|
||||
To ensure that all clients can access this endpoint, it is recommended you set the following headers for this endpoint:
|
||||
```
|
||||
Access-Control-Allow-Origin: *
|
||||
Access-Control-Allow-Methods: GET, POST, PUT, DELETE, OPTIONS
|
||||
Access-Control-Allow-Headers: X-Requested-With, Content-Type, Authorization
|
||||
```
|
||||
|
||||
If you also want to be able to use [sliding sync][0], look [here](faq.md#how-do-i-setup-sliding-sync).
|
||||
|
||||
[0]: https://matrix.org/blog/2023/09/matrix-2-0/#sliding-sync
|
|
@ -7,8 +7,8 @@ services:
|
|||
### then you are ready to go.
|
||||
image: matrixconduit/matrix-conduit:latest
|
||||
### If you want to build a fresh image from the sources, then comment the image line and uncomment the
|
||||
### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this:
|
||||
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d
|
||||
### build lines. If you want meaningful labels in your built Conduit image, you should run docker compose like this:
|
||||
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker compose up -d
|
||||
# build:
|
||||
# context: .
|
||||
# args:
|
||||
|
@ -26,8 +26,9 @@ services:
|
|||
CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/
|
||||
CONDUIT_DATABASE_BACKEND: rocksdb
|
||||
CONDUIT_PORT: 6167
|
||||
CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
||||
CONDUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
||||
CONDUIT_ALLOW_REGISTRATION: 'true'
|
||||
#CONDUIT_REGISTRATION_TOKEN: '' # require password for registration
|
||||
CONDUIT_ALLOW_FEDERATION: 'true'
|
||||
CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||
|
@ -37,7 +38,7 @@ services:
|
|||
|
||||
# We need some way to server the client and server .well-known json. The simplest way is to use a nginx container
|
||||
# to serve those two as static files. If you want to use a different way, delete or comment the below service, here
|
||||
# and in the docker-compose override file.
|
||||
# and in the docker compose override file.
|
||||
well-known:
|
||||
image: nginx:latest
|
||||
restart: unless-stopped
|
||||
|
|
|
@ -18,7 +18,7 @@ services:
|
|||
|
||||
# We need some way to server the client and server .well-known json. The simplest way is to use a nginx container
|
||||
# to serve those two as static files. If you want to use a different way, delete or comment the below service, here
|
||||
# and in the docker-compose file.
|
||||
# and in the docker compose file.
|
||||
well-known:
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
|
|
|
@ -7,8 +7,8 @@ services:
|
|||
### then you are ready to go.
|
||||
image: matrixconduit/matrix-conduit:latest
|
||||
### If you want to build a fresh image from the sources, then comment the image line and uncomment the
|
||||
### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this:
|
||||
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d
|
||||
### build lines. If you want meaningful labels in your built Conduit image, you should run docker compose like this:
|
||||
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker compose up -d
|
||||
# build:
|
||||
# context: .
|
||||
# args:
|
||||
|
@ -31,19 +31,18 @@ services:
|
|||
### Uncomment and change values as desired
|
||||
# CONDUIT_ADDRESS: 0.0.0.0
|
||||
# CONDUIT_PORT: 6167
|
||||
# CONDUIT_REGISTRATION_TOKEN: '' # require password for registration
|
||||
# CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string ''
|
||||
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
|
||||
# CONDUIT_ALLOW_JAEGER: 'false'
|
||||
# CONDUIT_ALLOW_ENCRYPTION: 'true'
|
||||
# CONDUIT_ALLOW_FEDERATION: 'true'
|
||||
# CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||
# CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit
|
||||
# CONDUIT_WORKERS: 10
|
||||
# CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
||||
# CONDUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
||||
|
||||
# We need some way to server the client and server .well-known json. The simplest way is to use a nginx container
|
||||
# to serve those two as static files. If you want to use a different way, delete or comment the below service, here
|
||||
# and in the docker-compose override file.
|
||||
# and in the docker compose override file.
|
||||
well-known:
|
||||
image: nginx:latest
|
||||
restart: unless-stopped
|
||||
|
|
|
@ -7,8 +7,8 @@ services:
|
|||
### then you are ready to go.
|
||||
image: matrixconduit/matrix-conduit:latest
|
||||
### If you want to build a fresh image from the sources, then comment the image line and uncomment the
|
||||
### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this:
|
||||
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d
|
||||
### build lines. If you want meaningful labels in your built Conduit image, you should run docker compose like this:
|
||||
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker compose up -d
|
||||
# build:
|
||||
# context: .
|
||||
# args:
|
||||
|
@ -26,7 +26,7 @@ services:
|
|||
CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/
|
||||
CONDUIT_DATABASE_BACKEND: rocksdb
|
||||
CONDUIT_PORT: 6167
|
||||
CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
||||
CONDUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
||||
CONDUIT_ALLOW_REGISTRATION: 'true'
|
||||
CONDUIT_ALLOW_FEDERATION: 'true'
|
||||
CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||
|
|
|
@ -61,13 +61,14 @@ docker run -d -p 8448:6167 \
|
|||
-e CONDUIT_DATABASE_BACKEND="rocksdb" \
|
||||
-e CONDUIT_ALLOW_REGISTRATION=true \
|
||||
-e CONDUIT_ALLOW_FEDERATION=true \
|
||||
-e CONDUIT_MAX_REQUEST_SIZE="20_000_000" \
|
||||
-e CONDUIT_MAX_REQUEST_SIZE="20000000" \
|
||||
-e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \
|
||||
-e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \
|
||||
-e CONDUIT_PORT="6167" \
|
||||
--name conduit <link>
|
||||
```
|
||||
|
||||
or you can use [docker-compose](#docker-compose).
|
||||
or you can use [docker compose](#docker-compose).
|
||||
|
||||
The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../configuration.md).
|
||||
You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need
|
||||
|
@ -75,9 +76,9 @@ to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible
|
|||
|
||||
If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it.
|
||||
|
||||
### Docker-compose
|
||||
### Docker compose
|
||||
|
||||
If the `docker run` command is not for you or your setup, you can also use one of the provided `docker-compose` files.
|
||||
If the `docker run` command is not for you or your setup, you can also use one of the provided `docker compose` files.
|
||||
|
||||
Depending on your proxy setup, you can use one of the following files;
|
||||
- If you already have a `traefik` instance set up, use [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml)
|
||||
|
@ -91,10 +92,10 @@ Additional info about deploying Conduit can be found [here](generic.md).
|
|||
|
||||
### Build
|
||||
|
||||
To build the Conduit image with docker-compose, you first need to open and modify the `docker-compose.yml` file. There you need to comment the `image:` option and uncomment the `build:` option. Then call docker-compose with:
|
||||
To build the Conduit image with docker compose, you first need to open and modify the `docker-compose.yml` file. There you need to comment the `image:` option and uncomment the `build:` option. Then call docker compose with:
|
||||
|
||||
```bash
|
||||
docker-compose up
|
||||
docker compose up
|
||||
```
|
||||
|
||||
This will also start the container right afterwards, so if want it to run in detached mode, you also should use the `-d` flag.
|
||||
|
@ -104,7 +105,7 @@ This will also start the container right afterwards, so if want it to run in det
|
|||
If you already have built the image or want to use one from the registries, you can just start the container and everything else in the compose file in detached mode with:
|
||||
|
||||
```bash
|
||||
docker-compose up -d
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
> **Note:** Don't forget to modify and adjust the compose file to your needs.
|
||||
|
@ -157,7 +158,7 @@ So...step by step:
|
|||
}
|
||||
```
|
||||
|
||||
6. Run `docker-compose up -d`
|
||||
6. Run `docker compose up -d`
|
||||
7. Connect to your homeserver with your preferred client and create a user. You should do this immediately after starting Conduit, because the first created user is the admin.
|
||||
|
||||
|
||||
|
@ -196,8 +197,8 @@ Run the [Coturn](https://hub.docker.com/r/coturn/coturn) image using
|
|||
docker run -d --network=host -v $(pwd)/coturn.conf:/etc/coturn/turnserver.conf coturn/coturn
|
||||
```
|
||||
|
||||
or docker-compose. For the latter, paste the following section into a file called `docker-compose.yml`
|
||||
and run `docker-compose up -d` in the same directory.
|
||||
or docker compose. For the latter, paste the following section into a file called `docker-compose.yml`
|
||||
and run `docker compose up -d` in the same directory.
|
||||
|
||||
```yml
|
||||
version: 3
|
||||
|
|
|
@ -10,31 +10,28 @@
|
|||
Although you might be able to compile Conduit for Windows, we do recommend running it on a Linux server. We therefore
|
||||
only offer Linux binaries.
|
||||
|
||||
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the appropriate url:
|
||||
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. For `arm`, you should use `aarch`. Now copy the appropriate url:
|
||||
|
||||
**Stable versions:**
|
||||
**Stable/Main versions:**
|
||||
|
||||
| CPU Architecture | Download stable version |
|
||||
| ------------------------------------------- | --------------------------------------------------------------- |
|
||||
| x84_64 / amd64 (Most servers and computers) | [Binary][x84_64-glibc-master] / [.deb][x84_64-glibc-master-deb] |
|
||||
| armv7 (e.g. Raspberry Pi by default) | [Binary][armv7-glibc-master] / [.deb][armv7-glibc-master-deb] |
|
||||
| armv8 / aarch64 | [Binary][armv8-glibc-master] / [.deb][armv8-glibc-master-deb] |
|
||||
| Target | Type | Download |
|
||||
|-|-|-|
|
||||
| `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/x86_64-unknown-linux-musl.deb?job=artifacts) |
|
||||
| `aarch64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/aarch64-unknown-linux-musl.deb?job=artifacts) |
|
||||
| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/x86_64-unknown-linux-musl?job=artifacts) |
|
||||
| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/aarch64-unknown-linux-musl?job=artifacts) |
|
||||
| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/oci-image-amd64.tar.gz?job=artifacts) |
|
||||
| `aarch64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/oci-image-arm64v8.tar.gz?job=artifacts) |
|
||||
|
||||
These builds were created on and linked against the glibc version shipped with Debian bullseye.
|
||||
If you use a system with an older glibc version (e.g. RHEL8), you might need to compile Conduit yourself.
|
||||
|
||||
[x84_64-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit?job=docker:master
|
||||
[armv7-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit?job=docker:master
|
||||
[armv8-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit?job=docker:master
|
||||
[x84_64-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit.deb?job=docker:master
|
||||
[armv7-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit.deb?job=docker:master
|
||||
[armv8-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit.deb?job=docker:master
|
||||
|
||||
**Latest versions:**
|
||||
**Latest/Next versions:**
|
||||
|
||||
| Target | Type | Download |
|
||||
|-|-|-|
|
||||
| `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl.deb?job=artifacts) |
|
||||
| `aarch64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl.deb?job=artifacts) |
|
||||
| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl?job=artifacts) |
|
||||
| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl?job=artifacts) |
|
||||
| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=artifacts) |
|
||||
|
|
41
docs/faq.md
Normal file
41
docs/faq.md
Normal file
|
@ -0,0 +1,41 @@
|
|||
# FAQ
|
||||
|
||||
Here are some of the most frequently asked questions about Conduit, and their answers.
|
||||
|
||||
## Why do I get a `M_INCOMPATIBLE_ROOM_VERSION` error when trying to join some rooms?
|
||||
|
||||
Conduit doesn't support room versions 1 and 2 at all, and doesn't properly support versions 3-5 currently. You can track the progress of adding support [here](https://gitlab.com/famedly/conduit/-/issues/433).
|
||||
|
||||
## How do I backup my server?
|
||||
|
||||
To backup your Conduit server, it's very easy.
|
||||
You can simply stop Conduit, make a copy or file system snapshot of the database directory, then start Conduit again.
|
||||
|
||||
> **Note**: When using a file system snapshot, it is not required that you stop the server, but it is still recommended as it is the safest option and should ensure your database is not left in an inconsistent state.
|
||||
|
||||
## How do I setup sliding sync?
|
||||
|
||||
If you use the [automatic method for delegation](delegation.md#automatic-recommended) or just proxy `.well-known/matrix/client` to Conduit, sliding sync should work with no extra configuration.
|
||||
If you don't, continue below.
|
||||
|
||||
You need to add a `org.matrix.msc3575.proxy` field to your `.well-known/matrix/client` response which contains a url which Conduit is accessible behind.
|
||||
Here is an example:
|
||||
```json
|
||||
{
|
||||
~ "m.homeserver": {
|
||||
~ "base_url": "https://matrix.example.org"
|
||||
~ },
|
||||
"org.matrix.msc3575.proxy": {
|
||||
"url": "https://matrix.example.org"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Can I migrate from Synapse to Conduit?
|
||||
|
||||
Not really. You can reuse the domain of your current server with Conduit, but you will not be able to migrate accounts automatically.
|
||||
Rooms that were federated can be re-joined via the other participating servers, however media and the like may be deleted from remote servers after some time, and hence might not be recoverable.
|
||||
|
||||
## How do I make someone an admin?
|
||||
|
||||
Simply invite them to the admin room. Once joined, they can administer the server by interacting with the `@conduit:<server_name>` user.
|
|
@ -1,8 +1,8 @@
|
|||
# Setting up TURN/STURN
|
||||
# Setting up TURN/STUN
|
||||
|
||||
## General instructions
|
||||
|
||||
* It is assumed you have a [Coturn server](https://github.com/coturn/coturn) up and running. See [Synapse reference implementation](https://github.com/matrix-org/synapse/blob/develop/docs/turn-howto.md).
|
||||
* It is assumed you have a [Coturn server](https://github.com/coturn/coturn) up and running. See [Synapse reference implementation](https://github.com/element-hq/synapse/blob/develop/docs/turn-howto.md).
|
||||
|
||||
## Edit/Add a few settings to your existing conduit.toml
|
||||
|
||||
|
|
29
engage.toml
29
engage.toml
|
@ -1,48 +1,48 @@
|
|||
interpreter = ["bash", "-euo", "pipefail", "-c"]
|
||||
|
||||
[[task]]
|
||||
name = "engage"
|
||||
group = "versions"
|
||||
name = "engage"
|
||||
script = "engage --version"
|
||||
|
||||
[[task]]
|
||||
name = "rustc"
|
||||
group = "versions"
|
||||
name = "rustc"
|
||||
script = "rustc --version"
|
||||
|
||||
[[task]]
|
||||
name = "cargo"
|
||||
group = "versions"
|
||||
name = "cargo"
|
||||
script = "cargo --version"
|
||||
|
||||
[[task]]
|
||||
name = "cargo-fmt"
|
||||
group = "versions"
|
||||
name = "cargo-fmt"
|
||||
script = "cargo fmt --version"
|
||||
|
||||
[[task]]
|
||||
name = "rustdoc"
|
||||
group = "versions"
|
||||
name = "rustdoc"
|
||||
script = "rustdoc --version"
|
||||
|
||||
[[task]]
|
||||
name = "cargo-clippy"
|
||||
group = "versions"
|
||||
name = "cargo-clippy"
|
||||
script = "cargo clippy -- --version"
|
||||
|
||||
[[task]]
|
||||
name = "lychee"
|
||||
group = "versions"
|
||||
name = "lychee"
|
||||
script = "lychee --version"
|
||||
|
||||
[[task]]
|
||||
name = "cargo-fmt"
|
||||
group = "lints"
|
||||
name = "cargo-fmt"
|
||||
script = "cargo fmt --check -- --color=always"
|
||||
|
||||
[[task]]
|
||||
name = "cargo-doc"
|
||||
group = "lints"
|
||||
name = "cargo-doc"
|
||||
script = """
|
||||
RUSTDOCFLAGS="-D warnings" cargo doc \
|
||||
--workspace \
|
||||
|
@ -52,18 +52,23 @@ RUSTDOCFLAGS="-D warnings" cargo doc \
|
|||
"""
|
||||
|
||||
[[task]]
|
||||
name = "cargo-clippy"
|
||||
group = "lints"
|
||||
name = "cargo-clippy"
|
||||
script = "cargo clippy --workspace --all-targets --color=always -- -D warnings"
|
||||
|
||||
[[task]]
|
||||
name = "lychee"
|
||||
group = "lints"
|
||||
name = "taplo-fmt"
|
||||
script = "taplo fmt --check --colors always"
|
||||
|
||||
[[task]]
|
||||
group = "lints"
|
||||
name = "lychee"
|
||||
script = "lychee --offline docs"
|
||||
|
||||
[[task]]
|
||||
name = "cargo"
|
||||
group = "tests"
|
||||
name = "cargo"
|
||||
script = """
|
||||
cargo test \
|
||||
--workspace \
|
||||
|
|
8
flake.lock
generated
8
flake.lock
generated
|
@ -51,17 +51,17 @@
|
|||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1707685877,
|
||||
"narHash": "sha256-XoXRS+5whotelr1rHiZle5t5hDg9kpguS5yk8c8qzOc=",
|
||||
"lastModified": 1713721181,
|
||||
"narHash": "sha256-Vz1KRVTzU3ClBfyhOj8gOehZk21q58T1YsXC30V23PU=",
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "2c653e4478476a52c6aa3ac0495e4dea7449ea0e",
|
||||
"rev": "55f4939ac59ff8f89c6a4029730a2d49ea09105f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "ipetkov",
|
||||
"ref": "master",
|
||||
"repo": "crane",
|
||||
"rev": "2c653e4478476a52c6aa3ac0495e4dea7449ea0e",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
|
|
361
flake.nix
361
flake.nix
|
@ -13,304 +13,103 @@
|
|||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
crane = {
|
||||
# Pin latest crane that's not affected by the following bugs:
|
||||
#
|
||||
# * <https://github.com/ipetkov/crane/issues/527#issuecomment-1978079140>
|
||||
# * <https://github.com/toml-rs/toml/issues/691>
|
||||
# * <https://github.com/toml-rs/toml/issues/267>
|
||||
url = "github:ipetkov/crane?rev=2c653e4478476a52c6aa3ac0495e4dea7449ea0e";
|
||||
url = "github:ipetkov/crane?ref=master";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
attic.url = "github:zhaofengli/attic?ref=main";
|
||||
};
|
||||
|
||||
outputs =
|
||||
{ self
|
||||
, nixpkgs
|
||||
, flake-utils
|
||||
, nix-filter
|
||||
|
||||
, fenix
|
||||
, crane
|
||||
, ...
|
||||
}: flake-utils.lib.eachDefaultSystem (system:
|
||||
outputs = inputs:
|
||||
let
|
||||
pkgsHost = nixpkgs.legacyPackages.${system};
|
||||
# Keep sorted
|
||||
mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: {
|
||||
craneLib =
|
||||
(inputs.crane.mkLib pkgs).overrideToolchain self.toolchain;
|
||||
|
||||
# Nix-accessible `Cargo.toml`
|
||||
cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml);
|
||||
default = self.callPackage ./nix/pkgs/default {};
|
||||
|
||||
# The Rust toolchain to use
|
||||
toolchain = fenix.packages.${system}.fromToolchainFile {
|
||||
file = ./rust-toolchain.toml;
|
||||
inherit inputs;
|
||||
|
||||
# See also `rust-toolchain.toml`
|
||||
sha256 = "sha256-SXRtAuO4IqNOQq+nLbrsDFbVk+3aVA8NNpSZsKlVH/8=";
|
||||
};
|
||||
oci-image = self.callPackage ./nix/pkgs/oci-image {};
|
||||
|
||||
builder = pkgs:
|
||||
((crane.mkLib pkgs).overrideToolchain toolchain).buildPackage;
|
||||
book = self.callPackage ./nix/pkgs/book {};
|
||||
|
||||
nativeBuildInputs = pkgs: [
|
||||
# bindgen needs the build platform's libclang. Apparently due to
|
||||
# "splicing weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't
|
||||
# quite do the right thing here.
|
||||
pkgs.pkgsBuildHost.rustPlatform.bindgenHook
|
||||
];
|
||||
rocksdb =
|
||||
let
|
||||
version = "9.1.1";
|
||||
in
|
||||
pkgs.rocksdb.overrideAttrs (old: {
|
||||
inherit version;
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "facebook";
|
||||
repo = "rocksdb";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-/Xf0bzNJPclH9IP80QNaABfhj4IAR5LycYET18VFCXc=";
|
||||
};
|
||||
});
|
||||
|
||||
rocksdb' = pkgs:
|
||||
let
|
||||
version = "8.11.3";
|
||||
in
|
||||
pkgs.rocksdb.overrideAttrs (old: {
|
||||
inherit version;
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "facebook";
|
||||
repo = "rocksdb";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-OpEiMwGxZuxb9o3RQuSrwZMQGLhe9xLT1aa3HpI4KPs=";
|
||||
};
|
||||
shell = self.callPackage ./nix/shell.nix {};
|
||||
|
||||
# The Rust toolchain to use
|
||||
toolchain = inputs
|
||||
.fenix
|
||||
.packages
|
||||
.${pkgs.pkgsBuildHost.system}
|
||||
.fromToolchainFile {
|
||||
file = ./rust-toolchain.toml;
|
||||
|
||||
# See also `rust-toolchain.toml`
|
||||
sha256 = "sha256-Ngiz76YP4HTY75GGdH2P+APE/DEIx2R/Dn+BwwOyzZU=";
|
||||
};
|
||||
});
|
||||
in
|
||||
inputs.flake-utils.lib.eachDefaultSystem (system:
|
||||
let
|
||||
pkgs = inputs.nixpkgs.legacyPackages.${system};
|
||||
in
|
||||
{
|
||||
packages = {
|
||||
default = (mkScope pkgs).default;
|
||||
oci-image = (mkScope pkgs).oci-image;
|
||||
book = (mkScope pkgs).book;
|
||||
}
|
||||
//
|
||||
builtins.listToAttrs
|
||||
(builtins.concatLists
|
||||
(builtins.map
|
||||
(crossSystem:
|
||||
let
|
||||
binaryName = "static-${crossSystem}";
|
||||
pkgsCrossStatic =
|
||||
(import inputs.nixpkgs {
|
||||
inherit system;
|
||||
crossSystem = {
|
||||
config = crossSystem;
|
||||
};
|
||||
}).pkgsStatic;
|
||||
in
|
||||
[
|
||||
# An output for a statically-linked binary
|
||||
{
|
||||
name = binaryName;
|
||||
value = (mkScope pkgsCrossStatic).default;
|
||||
}
|
||||
|
||||
env = pkgs: {
|
||||
ROCKSDB_INCLUDE_DIR = "${rocksdb' pkgs}/include";
|
||||
ROCKSDB_LIB_DIR = "${rocksdb' pkgs}/lib";
|
||||
}
|
||||
// pkgs.lib.optionalAttrs pkgs.stdenv.hostPlatform.isStatic {
|
||||
ROCKSDB_STATIC = "";
|
||||
}
|
||||
// {
|
||||
CARGO_BUILD_RUSTFLAGS = let inherit (pkgs) lib stdenv; in
|
||||
lib.concatStringsSep " " ([]
|
||||
++ lib.optionals
|
||||
# This disables PIE for static builds, which isn't great in terms
|
||||
# of security. Unfortunately, my hand is forced because nixpkgs'
|
||||
# `libstdc++.a` is built without `-fPIE`, which precludes us from
|
||||
# leaving PIE enabled.
|
||||
stdenv.hostPlatform.isStatic
|
||||
["-C" "relocation-model=static"]
|
||||
++ lib.optionals
|
||||
(stdenv.buildPlatform.config != stdenv.hostPlatform.config)
|
||||
["-l" "c"]
|
||||
++ lib.optionals
|
||||
# This check has to match the one [here][0]. We only need to set
|
||||
# these flags when using a different linker. Don't ask me why,
|
||||
# though, because I don't know. All I know is it breaks otherwise.
|
||||
#
|
||||
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L37-L40
|
||||
(
|
||||
# Nixpkgs doesn't check for x86_64 here but we do, because I
|
||||
# observed a failure building statically for x86_64 without
|
||||
# including it here. Linkers are weird.
|
||||
(stdenv.hostPlatform.isAarch64 || stdenv.hostPlatform.isx86_64)
|
||||
&& stdenv.hostPlatform.isStatic
|
||||
&& !stdenv.isDarwin
|
||||
&& !stdenv.cc.bintools.isLLVM
|
||||
# An output for an OCI image based on that binary
|
||||
{
|
||||
name = "oci-image-${crossSystem}";
|
||||
value = (mkScope pkgsCrossStatic).oci-image;
|
||||
}
|
||||
]
|
||||
)
|
||||
[
|
||||
"-l"
|
||||
"stdc++"
|
||||
"-L"
|
||||
"${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib"
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
# What follows is stolen from [here][0]. Its purpose is to properly
|
||||
# configure compilers and linkers for various stages of the build, and
|
||||
# even covers the case of build scripts that need native code compiled and
|
||||
# run on the build platform (I think).
|
||||
#
|
||||
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L57-L80
|
||||
// (
|
||||
let
|
||||
inherit (pkgs.rust.lib) envVars;
|
||||
in
|
||||
pkgs.lib.optionalAttrs
|
||||
(pkgs.stdenv.targetPlatform.rust.rustcTarget
|
||||
!= pkgs.stdenv.hostPlatform.rust.rustcTarget)
|
||||
(
|
||||
let
|
||||
inherit (pkgs.stdenv.targetPlatform.rust) cargoEnvVarTarget;
|
||||
in
|
||||
{
|
||||
"CC_${cargoEnvVarTarget}" = envVars.ccForTarget;
|
||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget;
|
||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" =
|
||||
envVars.linkerForTarget;
|
||||
}
|
||||
)
|
||||
// (
|
||||
let
|
||||
inherit (pkgs.stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget;
|
||||
in
|
||||
{
|
||||
"CC_${cargoEnvVarTarget}" = envVars.ccForHost;
|
||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForHost;
|
||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost;
|
||||
CARGO_BUILD_TARGET = rustcTarget;
|
||||
}
|
||||
)
|
||||
// (
|
||||
let
|
||||
inherit (pkgs.stdenv.buildPlatform.rust) cargoEnvVarTarget;
|
||||
in
|
||||
{
|
||||
"CC_${cargoEnvVarTarget}" = envVars.ccForBuild;
|
||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild;
|
||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild;
|
||||
HOST_CC = "${pkgs.pkgsBuildHost.stdenv.cc}/bin/cc";
|
||||
HOST_CXX = "${pkgs.pkgsBuildHost.stdenv.cc}/bin/c++";
|
||||
}
|
||||
));
|
||||
|
||||
package = pkgs: builder pkgs {
|
||||
src = nix-filter {
|
||||
root = ./.;
|
||||
include = [
|
||||
"src"
|
||||
"Cargo.toml"
|
||||
"Cargo.lock"
|
||||
];
|
||||
};
|
||||
|
||||
# This is redundant with CI
|
||||
doCheck = false;
|
||||
|
||||
env = env pkgs;
|
||||
nativeBuildInputs = nativeBuildInputs pkgs;
|
||||
|
||||
meta.mainProgram = cargoToml.package.name;
|
||||
};
|
||||
|
||||
mkOciImage = pkgs: package:
|
||||
pkgs.dockerTools.buildImage {
|
||||
name = package.pname;
|
||||
tag = "next";
|
||||
copyToRoot = [
|
||||
pkgs.dockerTools.caCertificates
|
||||
];
|
||||
config = {
|
||||
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT)
|
||||
# are handled as expected
|
||||
Entrypoint = [
|
||||
"${pkgs.lib.getExe' pkgs.tini "tini"}"
|
||||
"--"
|
||||
];
|
||||
Cmd = [
|
||||
"${pkgs.lib.getExe package}"
|
||||
];
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
packages = {
|
||||
default = package pkgsHost;
|
||||
oci-image = mkOciImage pkgsHost self.packages.${system}.default;
|
||||
|
||||
book =
|
||||
let
|
||||
package = self.packages.${system}.default;
|
||||
in
|
||||
pkgsHost.stdenv.mkDerivation {
|
||||
pname = "${package.pname}-book";
|
||||
version = package.version;
|
||||
|
||||
src = nix-filter {
|
||||
root = ./.;
|
||||
include = [
|
||||
"book.toml"
|
||||
"conduit-example.toml"
|
||||
"README.md"
|
||||
"debian/README.md"
|
||||
"docs"
|
||||
];
|
||||
};
|
||||
|
||||
nativeBuildInputs = (with pkgsHost; [
|
||||
mdbook
|
||||
]);
|
||||
|
||||
buildPhase = ''
|
||||
mdbook build
|
||||
mv public $out
|
||||
'';
|
||||
};
|
||||
}
|
||||
//
|
||||
builtins.listToAttrs
|
||||
(builtins.concatLists
|
||||
(builtins.map
|
||||
(crossSystem:
|
||||
let
|
||||
binaryName = "static-${crossSystem}";
|
||||
pkgsCrossStatic =
|
||||
(import nixpkgs {
|
||||
inherit system;
|
||||
crossSystem = {
|
||||
config = crossSystem;
|
||||
};
|
||||
}).pkgsStatic;
|
||||
in
|
||||
[
|
||||
# An output for a statically-linked binary
|
||||
{
|
||||
name = binaryName;
|
||||
value = package pkgsCrossStatic;
|
||||
}
|
||||
|
||||
# An output for an OCI image based on that binary
|
||||
{
|
||||
name = "oci-image-${crossSystem}";
|
||||
value = mkOciImage
|
||||
pkgsCrossStatic
|
||||
self.packages.${system}.${binaryName};
|
||||
}
|
||||
"x86_64-unknown-linux-musl"
|
||||
"aarch64-unknown-linux-musl"
|
||||
]
|
||||
)
|
||||
[
|
||||
"x86_64-unknown-linux-musl"
|
||||
"aarch64-unknown-linux-musl"
|
||||
]
|
||||
)
|
||||
);
|
||||
);
|
||||
|
||||
devShells.default = pkgsHost.mkShell {
|
||||
env = env pkgsHost // {
|
||||
# Rust Analyzer needs to be able to find the path to default crate
|
||||
# sources, and it can read this environment variable to do so. The
|
||||
# `rust-src` component is required in order for this to work.
|
||||
RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library";
|
||||
};
|
||||
|
||||
# Development tools
|
||||
nativeBuildInputs = nativeBuildInputs pkgsHost ++ [
|
||||
# Always use nightly rustfmt because most of its options are unstable
|
||||
#
|
||||
# This needs to come before `toolchain` in this list, otherwise
|
||||
# `$PATH` will have stable rustfmt instead.
|
||||
fenix.packages.${system}.latest.rustfmt
|
||||
|
||||
toolchain
|
||||
] ++ (with pkgsHost; [
|
||||
engage
|
||||
|
||||
# Needed for producing Debian packages
|
||||
cargo-deb
|
||||
|
||||
# Needed for Complement
|
||||
go
|
||||
olm
|
||||
|
||||
# Needed for our script for Complement
|
||||
jq
|
||||
|
||||
# Needed for finding broken markdown links
|
||||
lychee
|
||||
|
||||
# Useful for editing the book locally
|
||||
mdbook
|
||||
]);
|
||||
};
|
||||
});
|
||||
devShells.default = (mkScope pkgs).shell;
|
||||
}
|
||||
);
|
||||
}
|
||||
|
|
34
nix/pkgs/book/default.nix
Normal file
34
nix/pkgs/book/default.nix
Normal file
|
@ -0,0 +1,34 @@
|
|||
# Keep sorted
|
||||
{ default
|
||||
, inputs
|
||||
, mdbook
|
||||
, stdenv
|
||||
}:
|
||||
|
||||
stdenv.mkDerivation {
|
||||
pname = "${default.pname}-book";
|
||||
version = default.version;
|
||||
|
||||
|
||||
src = let filter = inputs.nix-filter.lib; in filter {
|
||||
root = inputs.self;
|
||||
|
||||
# Keep sorted
|
||||
include = [
|
||||
"book.toml"
|
||||
"conduit-example.toml"
|
||||
"debian/README.md"
|
||||
"docs"
|
||||
"README.md"
|
||||
];
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
mdbook
|
||||
];
|
||||
|
||||
buildPhase = ''
|
||||
mdbook build
|
||||
mv public $out
|
||||
'';
|
||||
}
|
100
nix/pkgs/default/cross-compilation-env.nix
Normal file
100
nix/pkgs/default/cross-compilation-env.nix
Normal file
|
@ -0,0 +1,100 @@
|
|||
{ lib
|
||||
, pkgsBuildHost
|
||||
, rust
|
||||
, stdenv
|
||||
}:
|
||||
|
||||
lib.optionalAttrs stdenv.hostPlatform.isStatic {
|
||||
ROCKSDB_STATIC = "";
|
||||
}
|
||||
//
|
||||
{
|
||||
CARGO_BUILD_RUSTFLAGS =
|
||||
lib.concatStringsSep
|
||||
" "
|
||||
([]
|
||||
# This disables PIE for static builds, which isn't great in terms of
|
||||
# security. Unfortunately, my hand is forced because nixpkgs'
|
||||
# `libstdc++.a` is built without `-fPIE`, which precludes us from
|
||||
# leaving PIE enabled.
|
||||
++ lib.optionals
|
||||
stdenv.hostPlatform.isStatic
|
||||
[ "-C" "relocation-model=static" ]
|
||||
++ lib.optionals
|
||||
(stdenv.buildPlatform.config != stdenv.hostPlatform.config)
|
||||
[ "-l" "c" ]
|
||||
++ lib.optionals
|
||||
# This check has to match the one [here][0]. We only need to set
|
||||
# these flags when using a different linker. Don't ask me why, though,
|
||||
# because I don't know. All I know is it breaks otherwise.
|
||||
#
|
||||
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L37-L40
|
||||
(
|
||||
# Nixpkgs doesn't check for x86_64 here but we do, because I
|
||||
# observed a failure building statically for x86_64 without
|
||||
# including it here. Linkers are weird.
|
||||
(stdenv.hostPlatform.isAarch64 || stdenv.hostPlatform.isx86_64)
|
||||
&& stdenv.hostPlatform.isStatic
|
||||
&& !stdenv.isDarwin
|
||||
&& !stdenv.cc.bintools.isLLVM
|
||||
)
|
||||
[
|
||||
"-l"
|
||||
"stdc++"
|
||||
"-L"
|
||||
"${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib"
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
# What follows is stolen from [here][0]. Its purpose is to properly configure
|
||||
# compilers and linkers for various stages of the build, and even covers the
|
||||
# case of build scripts that need native code compiled and run on the build
|
||||
# platform (I think).
|
||||
#
|
||||
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L57-L80
|
||||
//
|
||||
(
|
||||
let
|
||||
inherit (rust.lib) envVars;
|
||||
in
|
||||
lib.optionalAttrs
|
||||
(stdenv.targetPlatform.rust.rustcTarget
|
||||
!= stdenv.hostPlatform.rust.rustcTarget)
|
||||
(
|
||||
let
|
||||
inherit (stdenv.targetPlatform.rust) cargoEnvVarTarget;
|
||||
in
|
||||
{
|
||||
"CC_${cargoEnvVarTarget}" = envVars.ccForTarget;
|
||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget;
|
||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" =
|
||||
envVars.linkerForTarget;
|
||||
}
|
||||
)
|
||||
//
|
||||
(
|
||||
let
|
||||
inherit (stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget;
|
||||
in
|
||||
{
|
||||
"CC_${cargoEnvVarTarget}" = envVars.ccForHost;
|
||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForHost;
|
||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost;
|
||||
CARGO_BUILD_TARGET = rustcTarget;
|
||||
}
|
||||
)
|
||||
//
|
||||
(
|
||||
let
|
||||
inherit (stdenv.buildPlatform.rust) cargoEnvVarTarget;
|
||||
in
|
||||
{
|
||||
"CC_${cargoEnvVarTarget}" = envVars.ccForBuild;
|
||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild;
|
||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild;
|
||||
HOST_CC = "${pkgsBuildHost.stdenv.cc}/bin/cc";
|
||||
HOST_CXX = "${pkgsBuildHost.stdenv.cc}/bin/c++";
|
||||
}
|
||||
)
|
||||
)
|
95
nix/pkgs/default/default.nix
Normal file
95
nix/pkgs/default/default.nix
Normal file
|
@ -0,0 +1,95 @@
|
|||
# Dependencies (keep sorted)
|
||||
{ craneLib
|
||||
, inputs
|
||||
, lib
|
||||
, pkgsBuildHost
|
||||
, rocksdb
|
||||
, rust
|
||||
, stdenv
|
||||
|
||||
# Options (keep sorted)
|
||||
, default-features ? true
|
||||
, features ? []
|
||||
, profile ? "release"
|
||||
}:
|
||||
|
||||
let
|
||||
buildDepsOnlyEnv =
|
||||
let
|
||||
rocksdb' = rocksdb.override {
|
||||
enableJemalloc = builtins.elem "jemalloc" features;
|
||||
};
|
||||
in
|
||||
{
|
||||
NIX_OUTPATH_USED_AS_RANDOM_SEED = "randomseed"; # https://crane.dev/faq/rebuilds-bindgen.html
|
||||
ROCKSDB_INCLUDE_DIR = "${rocksdb'}/include";
|
||||
ROCKSDB_LIB_DIR = "${rocksdb'}/lib";
|
||||
}
|
||||
//
|
||||
(import ./cross-compilation-env.nix {
|
||||
# Keep sorted
|
||||
inherit
|
||||
lib
|
||||
pkgsBuildHost
|
||||
rust
|
||||
stdenv;
|
||||
});
|
||||
|
||||
buildPackageEnv = {
|
||||
CONDUIT_VERSION_EXTRA = inputs.self.shortRev or inputs.self.dirtyShortRev;
|
||||
} // buildDepsOnlyEnv;
|
||||
|
||||
commonAttrs = {
|
||||
inherit
|
||||
(craneLib.crateNameFromCargoToml {
|
||||
cargoToml = "${inputs.self}/Cargo.toml";
|
||||
})
|
||||
pname
|
||||
version;
|
||||
|
||||
src = let filter = inputs.nix-filter.lib; in filter {
|
||||
root = inputs.self;
|
||||
|
||||
# Keep sorted
|
||||
include = [
|
||||
"Cargo.lock"
|
||||
"Cargo.toml"
|
||||
"src"
|
||||
];
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
# bindgen needs the build platform's libclang. Apparently due to "splicing
|
||||
# weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't quite do the
|
||||
# right thing here.
|
||||
pkgsBuildHost.rustPlatform.bindgenHook
|
||||
];
|
||||
|
||||
CARGO_PROFILE = profile;
|
||||
};
|
||||
in
|
||||
|
||||
craneLib.buildPackage ( commonAttrs // {
|
||||
cargoArtifacts = craneLib.buildDepsOnly (commonAttrs // {
|
||||
env = buildDepsOnlyEnv;
|
||||
});
|
||||
|
||||
cargoExtraArgs = "--locked "
|
||||
+ lib.optionalString
|
||||
(!default-features)
|
||||
"--no-default-features "
|
||||
+ lib.optionalString
|
||||
(features != [])
|
||||
"--features " + (builtins.concatStringsSep "," features);
|
||||
|
||||
# This is redundant with CI
|
||||
doCheck = false;
|
||||
|
||||
env = buildPackageEnv;
|
||||
|
||||
passthru = {
|
||||
env = buildPackageEnv;
|
||||
};
|
||||
|
||||
meta.mainProgram = commonAttrs.pname;
|
||||
})
|
25
nix/pkgs/oci-image/default.nix
Normal file
25
nix/pkgs/oci-image/default.nix
Normal file
|
@ -0,0 +1,25 @@
|
|||
# Keep sorted
|
||||
{ default
|
||||
, dockerTools
|
||||
, lib
|
||||
, tini
|
||||
}:
|
||||
|
||||
dockerTools.buildImage {
|
||||
name = default.pname;
|
||||
tag = "next";
|
||||
copyToRoot = [
|
||||
dockerTools.caCertificates
|
||||
];
|
||||
config = {
|
||||
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT)
|
||||
# are handled as expected
|
||||
Entrypoint = [
|
||||
"${lib.getExe' tini "tini"}"
|
||||
"--"
|
||||
];
|
||||
Cmd = [
|
||||
"${lib.getExe default}"
|
||||
];
|
||||
};
|
||||
}
|
61
nix/shell.nix
Normal file
61
nix/shell.nix
Normal file
|
@ -0,0 +1,61 @@
|
|||
# Keep sorted
|
||||
{ cargo-deb
|
||||
, default
|
||||
, engage
|
||||
, go
|
||||
, inputs
|
||||
, jq
|
||||
, lychee
|
||||
, mdbook
|
||||
, mkShell
|
||||
, olm
|
||||
, system
|
||||
, taplo
|
||||
, toolchain
|
||||
}:
|
||||
|
||||
mkShell {
|
||||
env = default.env // {
|
||||
# Rust Analyzer needs to be able to find the path to default crate
|
||||
# sources, and it can read this environment variable to do so. The
|
||||
# `rust-src` component is required in order for this to work.
|
||||
RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library";
|
||||
};
|
||||
|
||||
# Development tools
|
||||
nativeBuildInputs = [
|
||||
# Always use nightly rustfmt because most of its options are unstable
|
||||
#
|
||||
# This needs to come before `toolchain` in this list, otherwise
|
||||
# `$PATH` will have stable rustfmt instead.
|
||||
inputs.fenix.packages.${system}.latest.rustfmt
|
||||
|
||||
# rust itself
|
||||
toolchain
|
||||
|
||||
# CI tests
|
||||
engage
|
||||
|
||||
# format toml files
|
||||
taplo
|
||||
|
||||
# Needed for producing Debian packages
|
||||
cargo-deb
|
||||
|
||||
# Needed for our script for Complement
|
||||
jq
|
||||
|
||||
# Needed for Complement
|
||||
go
|
||||
olm
|
||||
|
||||
# Needed for our script for Complement
|
||||
jq
|
||||
|
||||
# Needed for finding broken markdown links
|
||||
lychee
|
||||
|
||||
# Useful for editing the book locally
|
||||
mdbook
|
||||
] ++ default.nativeBuildInputs ;
|
||||
}
|
|
@ -2,7 +2,6 @@
|
|||
#
|
||||
# Other files that need upkeep when this changes:
|
||||
#
|
||||
# * `.gitlab-ci.yml`
|
||||
# * `Cargo.toml`
|
||||
# * `flake.nix`
|
||||
#
|
||||
|
@ -10,13 +9,13 @@
|
|||
# If you're having trouble making the relevant changes, bug a maintainer.
|
||||
|
||||
[toolchain]
|
||||
channel = "1.75.0"
|
||||
channel = "1.79.0"
|
||||
components = [
|
||||
# For rust-analyzer
|
||||
"rust-src",
|
||||
# For rust-analyzer
|
||||
"rust-src",
|
||||
]
|
||||
targets = [
|
||||
"x86_64-unknown-linux-gnu",
|
||||
"x86_64-unknown-linux-musl",
|
||||
"aarch64-unknown-linux-musl",
|
||||
"aarch64-unknown-linux-musl",
|
||||
"x86_64-unknown-linux-gnu",
|
||||
"x86_64-unknown-linux-musl",
|
||||
]
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
imports_granularity = "Crate"
|
||||
unstable_features = true
|
||||
imports_granularity="Crate"
|
||||
|
|
|
@ -10,12 +10,12 @@ use tracing::warn;
|
|||
///
|
||||
/// Only returns None if there is no url specified in the appservice registration file
|
||||
#[tracing::instrument(skip(request))]
|
||||
pub(crate) async fn send_request<T: OutgoingRequest>(
|
||||
pub(crate) async fn send_request<T>(
|
||||
registration: Registration,
|
||||
request: T,
|
||||
) -> Result<Option<T::IncomingResponse>>
|
||||
where
|
||||
T: Debug,
|
||||
T: OutgoingRequest + Debug,
|
||||
{
|
||||
let destination = match registration.url {
|
||||
Some(url) => url,
|
||||
|
|
|
@ -75,23 +75,13 @@ pub async fn get_register_available_route(
|
|||
/// - Creates a new account and populates it with default account data
|
||||
/// - If `inhibit_login` is false: Creates a device and returns device id and access_token
|
||||
pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<register::v3::Response> {
|
||||
if !services().globals.allow_registration()
|
||||
&& !body.from_appservice
|
||||
&& services().globals.config.registration_token.is_none()
|
||||
{
|
||||
if !services().globals.allow_registration().await && body.appservice_info.is_none() {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"Registration has been disabled.",
|
||||
));
|
||||
}
|
||||
|
||||
if body.body.login_type == Some(LoginType::ApplicationService) && !body.from_appservice {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::MissingToken,
|
||||
"Missing appservice token.",
|
||||
));
|
||||
}
|
||||
|
||||
let is_guest = body.kind == RegistrationKind::Guest;
|
||||
|
||||
let user_id = match (&body.username, is_guest) {
|
||||
|
@ -129,22 +119,56 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
|
|||
},
|
||||
};
|
||||
|
||||
if body.body.login_type == Some(LoginType::ApplicationService) {
|
||||
if let Some(ref info) = body.appservice_info {
|
||||
if !info.is_user_match(&user_id) {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"User is not in namespace.",
|
||||
));
|
||||
}
|
||||
} else {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::MissingToken,
|
||||
"Missing appservice token.",
|
||||
));
|
||||
}
|
||||
} else if services().appservice.is_exclusive_user_id(&user_id).await {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"User id reserved by appservice.",
|
||||
));
|
||||
}
|
||||
|
||||
// UIAA
|
||||
let mut uiaainfo = UiaaInfo {
|
||||
flows: vec![AuthFlow {
|
||||
stages: if services().globals.config.registration_token.is_some() {
|
||||
vec![AuthType::RegistrationToken]
|
||||
} else {
|
||||
vec![AuthType::Dummy]
|
||||
},
|
||||
}],
|
||||
completed: Vec::new(),
|
||||
params: Default::default(),
|
||||
session: None,
|
||||
auth_error: None,
|
||||
let mut uiaainfo;
|
||||
let skip_auth = if services().globals.config.registration_token.is_some() {
|
||||
// Registration token required
|
||||
uiaainfo = UiaaInfo {
|
||||
flows: vec![AuthFlow {
|
||||
stages: vec![AuthType::RegistrationToken],
|
||||
}],
|
||||
completed: Vec::new(),
|
||||
params: Default::default(),
|
||||
session: None,
|
||||
auth_error: None,
|
||||
};
|
||||
body.appservice_info.is_some()
|
||||
} else {
|
||||
// No registration token necessary, but clients must still go through the flow
|
||||
uiaainfo = UiaaInfo {
|
||||
flows: vec![AuthFlow {
|
||||
stages: vec![AuthType::Dummy],
|
||||
}],
|
||||
completed: Vec::new(),
|
||||
params: Default::default(),
|
||||
session: None,
|
||||
auth_error: None,
|
||||
};
|
||||
body.appservice_info.is_some() || is_guest
|
||||
};
|
||||
|
||||
if !body.from_appservice && !is_guest {
|
||||
if !skip_auth {
|
||||
if let Some(auth) = &body.auth {
|
||||
let (worked, uiaainfo) = services().uiaa.try_auth(
|
||||
&UserId::parse_with_server_name("", services().globals.server_name())
|
||||
|
@ -237,7 +261,7 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
|
|||
)?;
|
||||
|
||||
info!("New user {} registered on this server.", user_id);
|
||||
if !body.from_appservice && !is_guest {
|
||||
if body.appservice_info.is_none() && !is_guest {
|
||||
services()
|
||||
.admin
|
||||
.send_message(RoomMessageEventContent::notice_plain(format!(
|
||||
|
@ -291,7 +315,11 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
|
|||
pub async fn change_password_route(
|
||||
body: Ruma<change_password::v3::Request>,
|
||||
) -> Result<change_password::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
let sender_user = body
|
||||
.sender_user
|
||||
.as_ref()
|
||||
// In the future password changes could be performed with UIA with 3PIDs, but we don't support that currently
|
||||
.ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?;
|
||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||
|
||||
let mut uiaainfo = UiaaInfo {
|
||||
|
@ -361,7 +389,7 @@ pub async fn whoami_route(body: Ruma<whoami::v3::Request>) -> Result<whoami::v3:
|
|||
Ok(whoami::v3::Response {
|
||||
user_id: sender_user.clone(),
|
||||
device_id,
|
||||
is_guest: services().users.is_deactivated(sender_user)? && !body.from_appservice,
|
||||
is_guest: services().users.is_deactivated(sender_user)? && body.appservice_info.is_none(),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -378,7 +406,11 @@ pub async fn whoami_route(body: Ruma<whoami::v3::Request>) -> Result<whoami::v3:
|
|||
pub async fn deactivate_route(
|
||||
body: Ruma<deactivate::v3::Request>,
|
||||
) -> Result<deactivate::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
let sender_user = body
|
||||
.sender_user
|
||||
.as_ref()
|
||||
// In the future password changes could be performed with UIA with SSO, but we don't support that currently
|
||||
.ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?;
|
||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||
|
||||
let mut uiaainfo = UiaaInfo {
|
||||
|
@ -451,7 +483,7 @@ pub async fn request_3pid_management_token_via_email_route(
|
|||
) -> Result<request_3pid_management_token_via_email::v3::Response> {
|
||||
Err(Error::BadRequest(
|
||||
ErrorKind::ThreepidDenied,
|
||||
"Third party identifier is not allowed",
|
||||
"Third party identifiers are currently unsupported by this server implementation",
|
||||
))
|
||||
}
|
||||
|
||||
|
@ -465,6 +497,6 @@ pub async fn request_3pid_management_token_via_msisdn_route(
|
|||
) -> Result<request_3pid_management_token_via_msisdn::v3::Response> {
|
||||
Err(Error::BadRequest(
|
||||
ErrorKind::ThreepidDenied,
|
||||
"Third party identifier is not allowed",
|
||||
"Third party identifiers are currently unsupported by this server implementation",
|
||||
))
|
||||
}
|
||||
|
|
|
@ -18,6 +18,8 @@ use ruma::{
|
|||
pub async fn create_alias_route(
|
||||
body: Ruma<create_alias::v3::Request>,
|
||||
) -> Result<create_alias::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
|
||||
if body.room_alias.server_name() != services().globals.server_name() {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
|
@ -25,6 +27,24 @@ pub async fn create_alias_route(
|
|||
));
|
||||
}
|
||||
|
||||
if let Some(ref info) = body.appservice_info {
|
||||
if !info.aliases.is_match(body.room_alias.as_str()) {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"Room alias is not in namespace.",
|
||||
));
|
||||
}
|
||||
} else if services()
|
||||
.appservice
|
||||
.is_exclusive_alias(&body.room_alias)
|
||||
.await
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"Room alias reserved by appservice.",
|
||||
));
|
||||
}
|
||||
|
||||
if services()
|
||||
.rooms
|
||||
.alias
|
||||
|
@ -37,7 +57,7 @@ pub async fn create_alias_route(
|
|||
services()
|
||||
.rooms
|
||||
.alias
|
||||
.set_alias(&body.room_alias, &body.room_id)?;
|
||||
.set_alias(&body.room_alias, &body.room_id, sender_user)?;
|
||||
|
||||
Ok(create_alias::v3::Response::new())
|
||||
}
|
||||
|
@ -46,11 +66,12 @@ pub async fn create_alias_route(
|
|||
///
|
||||
/// Deletes a room alias from this server.
|
||||
///
|
||||
/// - TODO: additional access control checks
|
||||
/// - TODO: Update canonical alias event
|
||||
pub async fn delete_alias_route(
|
||||
body: Ruma<delete_alias::v3::Request>,
|
||||
) -> Result<delete_alias::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
|
||||
if body.room_alias.server_name() != services().globals.server_name() {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
|
@ -58,7 +79,28 @@ pub async fn delete_alias_route(
|
|||
));
|
||||
}
|
||||
|
||||
services().rooms.alias.remove_alias(&body.room_alias)?;
|
||||
if let Some(ref info) = body.appservice_info {
|
||||
if !info.aliases.is_match(body.room_alias.as_str()) {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"Room alias is not in namespace.",
|
||||
));
|
||||
}
|
||||
} else if services()
|
||||
.appservice
|
||||
.is_exclusive_alias(&body.room_alias)
|
||||
.await
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"Room alias reserved by appservice.",
|
||||
));
|
||||
}
|
||||
|
||||
services()
|
||||
.rooms
|
||||
.alias
|
||||
.remove_alias(&body.room_alias, sender_user)?;
|
||||
|
||||
// TODO: update alt_aliases?
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ pub async fn get_context_route(
|
|||
.user_can_see_event(sender_user, &room_id, &body.event_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"You don't have permission to view this event.",
|
||||
));
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ pub async fn update_device_route(
|
|||
.get_device_metadata(sender_user, &body.device_id)?
|
||||
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?;
|
||||
|
||||
device.display_name = body.display_name.clone();
|
||||
device.display_name.clone_from(&body.display_name);
|
||||
|
||||
services()
|
||||
.users
|
||||
|
|
|
@ -1,12 +1,24 @@
|
|||
// Unauthenticated media is deprecated
|
||||
#![allow(deprecated)]
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::{service::media::FileMeta, services, utils, Error, Result, Ruma};
|
||||
use ruma::api::client::{
|
||||
error::ErrorKind,
|
||||
media::{
|
||||
create_content, get_content, get_content_as_filename, get_content_thumbnail,
|
||||
get_media_config,
|
||||
use http::header::{CONTENT_DISPOSITION, CONTENT_TYPE};
|
||||
use ruma::{
|
||||
api::{
|
||||
client::{
|
||||
authenticated_media::{
|
||||
get_content, get_content_as_filename, get_content_thumbnail, get_media_config,
|
||||
},
|
||||
error::ErrorKind,
|
||||
media::{self, create_content},
|
||||
},
|
||||
federation::authenticated_media::{self as federation_media, FileOrLocation},
|
||||
},
|
||||
http_headers::{ContentDisposition, ContentDispositionType},
|
||||
media::Method,
|
||||
ServerName, UInt,
|
||||
};
|
||||
|
||||
const MXC_LENGTH: usize = 32;
|
||||
|
@ -15,9 +27,20 @@ const MXC_LENGTH: usize = 32;
|
|||
///
|
||||
/// Returns max upload size.
|
||||
pub async fn get_media_config_route(
|
||||
_body: Ruma<get_media_config::v3::Request>,
|
||||
) -> Result<get_media_config::v3::Response> {
|
||||
Ok(get_media_config::v3::Response {
|
||||
_body: Ruma<media::get_media_config::v3::Request>,
|
||||
) -> Result<media::get_media_config::v3::Response> {
|
||||
Ok(media::get_media_config::v3::Response {
|
||||
upload_size: services().globals.max_request_size().into(),
|
||||
})
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/v1/media/config`
|
||||
///
|
||||
/// Returns max upload size.
|
||||
pub async fn get_media_config_auth_route(
|
||||
_body: Ruma<get_media_config::v1::Request>,
|
||||
) -> Result<get_media_config::v1::Response> {
|
||||
Ok(get_media_config::v1::Response {
|
||||
upload_size: services().globals.max_request_size().into(),
|
||||
})
|
||||
}
|
||||
|
@ -41,10 +64,10 @@ pub async fn create_content_route(
|
|||
.media
|
||||
.create(
|
||||
mxc.clone(),
|
||||
body.filename
|
||||
.as_ref()
|
||||
.map(|filename| "inline; filename=".to_owned() + filename)
|
||||
.as_deref(),
|
||||
Some(
|
||||
ContentDisposition::new(ContentDispositionType::Inline)
|
||||
.with_filename(body.filename.clone()),
|
||||
),
|
||||
body.content_type.as_deref(),
|
||||
&body.file,
|
||||
)
|
||||
|
@ -58,28 +81,67 @@ pub async fn create_content_route(
|
|||
|
||||
pub async fn get_remote_content(
|
||||
mxc: &str,
|
||||
server_name: &ruma::ServerName,
|
||||
server_name: &ServerName,
|
||||
media_id: String,
|
||||
) -> Result<get_content::v3::Response, Error> {
|
||||
let content_response = services()
|
||||
) -> Result<get_content::v1::Response, Error> {
|
||||
let content_response = match services()
|
||||
.sending
|
||||
.send_federation_request(
|
||||
server_name,
|
||||
get_content::v3::Request {
|
||||
allow_remote: false,
|
||||
server_name: server_name.to_owned(),
|
||||
media_id,
|
||||
federation_media::get_content::v1::Request {
|
||||
media_id: media_id.clone(),
|
||||
timeout_ms: Duration::from_secs(20),
|
||||
allow_redirect: false,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
.await
|
||||
{
|
||||
Ok(federation_media::get_content::v1::Response {
|
||||
metadata: _,
|
||||
content: FileOrLocation::File(content),
|
||||
}) => get_content::v1::Response {
|
||||
file: content.file,
|
||||
content_type: content.content_type,
|
||||
content_disposition: content.content_disposition,
|
||||
},
|
||||
|
||||
Ok(federation_media::get_content::v1::Response {
|
||||
metadata: _,
|
||||
content: FileOrLocation::Location(url),
|
||||
}) => get_location_content(url).await?,
|
||||
Err(Error::BadRequest(ErrorKind::Unrecognized, _)) => {
|
||||
let media::get_content::v3::Response {
|
||||
file,
|
||||
content_type,
|
||||
content_disposition,
|
||||
..
|
||||
} = services()
|
||||
.sending
|
||||
.send_federation_request(
|
||||
server_name,
|
||||
media::get_content::v3::Request {
|
||||
server_name: server_name.to_owned(),
|
||||
media_id,
|
||||
timeout_ms: Duration::from_secs(20),
|
||||
allow_remote: false,
|
||||
allow_redirect: true,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
get_content::v1::Response {
|
||||
file,
|
||||
content_type,
|
||||
content_disposition,
|
||||
}
|
||||
}
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
services()
|
||||
.media
|
||||
.create(
|
||||
mxc.to_owned(),
|
||||
content_response.content_disposition.as_deref(),
|
||||
content_response.content_disposition.clone(),
|
||||
content_response.content_type.as_deref(),
|
||||
&content_response.file,
|
||||
)
|
||||
|
@ -94,26 +156,58 @@ pub async fn get_remote_content(
|
|||
///
|
||||
/// - Only allows federation if `allow_remote` is true
|
||||
pub async fn get_content_route(
|
||||
body: Ruma<get_content::v3::Request>,
|
||||
) -> Result<get_content::v3::Response> {
|
||||
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
||||
body: Ruma<media::get_content::v3::Request>,
|
||||
) -> Result<media::get_content::v3::Response> {
|
||||
let get_content::v1::Response {
|
||||
file,
|
||||
content_disposition,
|
||||
content_type,
|
||||
} = get_content(&body.server_name, body.media_id.clone(), body.allow_remote).await?;
|
||||
|
||||
if let Some(FileMeta {
|
||||
Ok(media::get_content::v3::Response {
|
||||
file,
|
||||
content_type,
|
||||
content_disposition,
|
||||
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||
})
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}`
|
||||
///
|
||||
/// Load media from our server or over federation.
|
||||
pub async fn get_content_auth_route(
|
||||
body: Ruma<get_content::v1::Request>,
|
||||
) -> Result<get_content::v1::Response> {
|
||||
get_content(&body.server_name, body.media_id.clone(), true).await
|
||||
}
|
||||
|
||||
async fn get_content(
|
||||
server_name: &ServerName,
|
||||
media_id: String,
|
||||
allow_remote: bool,
|
||||
) -> Result<get_content::v1::Response, Error> {
|
||||
let mxc = format!("mxc://{}/{}", server_name, media_id);
|
||||
|
||||
if let Ok(Some(FileMeta {
|
||||
content_disposition,
|
||||
content_type,
|
||||
file,
|
||||
}) = services().media.get(mxc.clone()).await?
|
||||
})) = services().media.get(mxc.clone()).await
|
||||
{
|
||||
Ok(get_content::v3::Response {
|
||||
Ok(get_content::v1::Response {
|
||||
file,
|
||||
content_type,
|
||||
content_disposition,
|
||||
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||
content_disposition: Some(content_disposition),
|
||||
})
|
||||
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
|
||||
} else if server_name != services().globals.server_name() && allow_remote {
|
||||
let remote_content_response =
|
||||
get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?;
|
||||
Ok(remote_content_response)
|
||||
get_remote_content(&mxc, server_name, media_id.clone()).await?;
|
||||
|
||||
Ok(get_content::v1::Response {
|
||||
content_disposition: remote_content_response.content_disposition,
|
||||
content_type: remote_content_response.content_type,
|
||||
file: remote_content_response.file,
|
||||
})
|
||||
} else {
|
||||
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
||||
}
|
||||
|
@ -125,31 +219,74 @@ pub async fn get_content_route(
|
|||
///
|
||||
/// - Only allows federation if `allow_remote` is true
|
||||
pub async fn get_content_as_filename_route(
|
||||
body: Ruma<get_content_as_filename::v3::Request>,
|
||||
) -> Result<get_content_as_filename::v3::Response> {
|
||||
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
||||
|
||||
if let Some(FileMeta {
|
||||
content_disposition: _,
|
||||
content_type,
|
||||
body: Ruma<media::get_content_as_filename::v3::Request>,
|
||||
) -> Result<media::get_content_as_filename::v3::Response> {
|
||||
let get_content_as_filename::v1::Response {
|
||||
file,
|
||||
}) = services().media.get(mxc.clone()).await?
|
||||
content_type,
|
||||
content_disposition,
|
||||
} = get_content_as_filename(
|
||||
&body.server_name,
|
||||
body.media_id.clone(),
|
||||
body.filename.clone(),
|
||||
body.allow_remote,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(media::get_content_as_filename::v3::Response {
|
||||
file,
|
||||
content_type,
|
||||
content_disposition,
|
||||
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||
})
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}/{fileName}`
|
||||
///
|
||||
/// Load media from our server or over federation, permitting desired filename.
|
||||
pub async fn get_content_as_filename_auth_route(
|
||||
body: Ruma<get_content_as_filename::v1::Request>,
|
||||
) -> Result<get_content_as_filename::v1::Response, Error> {
|
||||
get_content_as_filename(
|
||||
&body.server_name,
|
||||
body.media_id.clone(),
|
||||
body.filename.clone(),
|
||||
true,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_content_as_filename(
|
||||
server_name: &ServerName,
|
||||
media_id: String,
|
||||
filename: String,
|
||||
allow_remote: bool,
|
||||
) -> Result<get_content_as_filename::v1::Response, Error> {
|
||||
let mxc = format!("mxc://{}/{}", server_name, media_id);
|
||||
|
||||
if let Ok(Some(FileMeta {
|
||||
file, content_type, ..
|
||||
})) = services().media.get(mxc.clone()).await
|
||||
{
|
||||
Ok(get_content_as_filename::v3::Response {
|
||||
Ok(get_content_as_filename::v1::Response {
|
||||
file,
|
||||
content_type,
|
||||
content_disposition: Some(format!("inline; filename={}", body.filename)),
|
||||
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||
content_disposition: Some(
|
||||
ContentDisposition::new(ContentDispositionType::Inline)
|
||||
.with_filename(Some(filename.clone())),
|
||||
),
|
||||
})
|
||||
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
|
||||
} else if server_name != services().globals.server_name() && allow_remote {
|
||||
let remote_content_response =
|
||||
get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?;
|
||||
get_remote_content(&mxc, server_name, media_id.clone()).await?;
|
||||
|
||||
Ok(get_content_as_filename::v3::Response {
|
||||
content_disposition: Some(format!("inline: filename={}", body.filename)),
|
||||
Ok(get_content_as_filename::v1::Response {
|
||||
content_disposition: Some(
|
||||
ContentDisposition::new(ContentDispositionType::Inline)
|
||||
.with_filename(Some(filename.clone())),
|
||||
),
|
||||
content_type: remote_content_response.content_type,
|
||||
file: remote_content_response.file,
|
||||
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||
})
|
||||
} else {
|
||||
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
||||
|
@ -162,62 +299,169 @@ pub async fn get_content_as_filename_route(
|
|||
///
|
||||
/// - Only allows federation if `allow_remote` is true
|
||||
pub async fn get_content_thumbnail_route(
|
||||
body: Ruma<get_content_thumbnail::v3::Request>,
|
||||
) -> Result<get_content_thumbnail::v3::Response> {
|
||||
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
||||
body: Ruma<media::get_content_thumbnail::v3::Request>,
|
||||
) -> Result<media::get_content_thumbnail::v3::Response> {
|
||||
let get_content_thumbnail::v1::Response { file, content_type } = get_content_thumbnail(
|
||||
&body.server_name,
|
||||
body.media_id.clone(),
|
||||
body.height,
|
||||
body.width,
|
||||
body.method.clone(),
|
||||
body.animated,
|
||||
body.allow_remote,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if let Some(FileMeta {
|
||||
content_type, file, ..
|
||||
}) = services()
|
||||
Ok(media::get_content_thumbnail::v3::Response {
|
||||
file,
|
||||
content_type,
|
||||
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||
})
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/v1/media/thumbnail/{serverName}/{mediaId}`
|
||||
///
|
||||
/// Load media thumbnail from our server or over federation.
|
||||
pub async fn get_content_thumbnail_auth_route(
|
||||
body: Ruma<get_content_thumbnail::v1::Request>,
|
||||
) -> Result<get_content_thumbnail::v1::Response> {
|
||||
get_content_thumbnail(
|
||||
&body.server_name,
|
||||
body.media_id.clone(),
|
||||
body.height,
|
||||
body.width,
|
||||
body.method.clone(),
|
||||
body.animated,
|
||||
true,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_content_thumbnail(
|
||||
server_name: &ServerName,
|
||||
media_id: String,
|
||||
height: UInt,
|
||||
width: UInt,
|
||||
method: Option<Method>,
|
||||
animated: Option<bool>,
|
||||
allow_remote: bool,
|
||||
) -> Result<get_content_thumbnail::v1::Response, Error> {
|
||||
let mxc = format!("mxc://{}/{}", server_name, media_id);
|
||||
|
||||
if let Ok(Some(FileMeta {
|
||||
file, content_type, ..
|
||||
})) = services()
|
||||
.media
|
||||
.get_thumbnail(
|
||||
mxc.clone(),
|
||||
body.width
|
||||
width
|
||||
.try_into()
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
|
||||
body.height
|
||||
height
|
||||
.try_into()
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Height is invalid."))?,
|
||||
)
|
||||
.await?
|
||||
.await
|
||||
{
|
||||
Ok(get_content_thumbnail::v3::Response {
|
||||
file,
|
||||
content_type,
|
||||
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||
})
|
||||
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
|
||||
let get_thumbnail_response = services()
|
||||
Ok(get_content_thumbnail::v1::Response { file, content_type })
|
||||
} else if server_name != services().globals.server_name() && allow_remote {
|
||||
let thumbnail_response = match services()
|
||||
.sending
|
||||
.send_federation_request(
|
||||
&body.server_name,
|
||||
get_content_thumbnail::v3::Request {
|
||||
allow_remote: false,
|
||||
height: body.height,
|
||||
width: body.width,
|
||||
method: body.method.clone(),
|
||||
server_name: body.server_name.clone(),
|
||||
media_id: body.media_id.clone(),
|
||||
server_name,
|
||||
federation_media::get_content_thumbnail::v1::Request {
|
||||
height,
|
||||
width,
|
||||
method: method.clone(),
|
||||
media_id: media_id.clone(),
|
||||
timeout_ms: Duration::from_secs(20),
|
||||
allow_redirect: false,
|
||||
animated,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
.await
|
||||
{
|
||||
Ok(federation_media::get_content_thumbnail::v1::Response {
|
||||
metadata: _,
|
||||
content: FileOrLocation::File(content),
|
||||
}) => get_content_thumbnail::v1::Response {
|
||||
file: content.file,
|
||||
content_type: content.content_type,
|
||||
},
|
||||
|
||||
Ok(federation_media::get_content_thumbnail::v1::Response {
|
||||
metadata: _,
|
||||
content: FileOrLocation::Location(url),
|
||||
}) => {
|
||||
let get_content::v1::Response {
|
||||
file, content_type, ..
|
||||
} = get_location_content(url).await?;
|
||||
|
||||
get_content_thumbnail::v1::Response { file, content_type }
|
||||
}
|
||||
Err(Error::BadRequest(ErrorKind::Unrecognized, _)) => {
|
||||
let media::get_content_thumbnail::v3::Response {
|
||||
file, content_type, ..
|
||||
} = services()
|
||||
.sending
|
||||
.send_federation_request(
|
||||
server_name,
|
||||
media::get_content_thumbnail::v3::Request {
|
||||
height,
|
||||
width,
|
||||
method: method.clone(),
|
||||
server_name: server_name.to_owned(),
|
||||
media_id: media_id.clone(),
|
||||
timeout_ms: Duration::from_secs(20),
|
||||
allow_redirect: false,
|
||||
animated,
|
||||
allow_remote: false,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
get_content_thumbnail::v1::Response { file, content_type }
|
||||
}
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
services()
|
||||
.media
|
||||
.upload_thumbnail(
|
||||
mxc,
|
||||
None,
|
||||
get_thumbnail_response.content_type.as_deref(),
|
||||
body.width.try_into().expect("all UInts are valid u32s"),
|
||||
body.height.try_into().expect("all UInts are valid u32s"),
|
||||
&get_thumbnail_response.file,
|
||||
thumbnail_response.content_type.as_deref(),
|
||||
width.try_into().expect("all UInts are valid u32s"),
|
||||
height.try_into().expect("all UInts are valid u32s"),
|
||||
&thumbnail_response.file,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(get_thumbnail_response)
|
||||
Ok(thumbnail_response)
|
||||
} else {
|
||||
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_location_content(url: String) -> Result<get_content::v1::Response, Error> {
|
||||
let client = services().globals.default_client();
|
||||
let response = client.get(url).send().await?;
|
||||
let headers = response.headers();
|
||||
|
||||
let content_type = headers
|
||||
.get(CONTENT_TYPE)
|
||||
.and_then(|header| header.to_str().ok())
|
||||
.map(ToOwned::to_owned);
|
||||
|
||||
let content_disposition = headers
|
||||
.get(CONTENT_DISPOSITION)
|
||||
.map(|header| header.as_bytes())
|
||||
.map(TryFrom::try_from)
|
||||
.and_then(Result::ok);
|
||||
|
||||
let file = response.bytes().await?.to_vec();
|
||||
|
||||
Ok(get_content::v1::Response {
|
||||
file,
|
||||
content_type,
|
||||
content_disposition,
|
||||
})
|
||||
}
|
||||
|
|
|
@ -15,13 +15,11 @@ use ruma::{
|
|||
room::{
|
||||
join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent},
|
||||
member::{MembershipState, RoomMemberEventContent},
|
||||
power_levels::RoomPowerLevelsEventContent,
|
||||
},
|
||||
StateEventType, TimelineEventType,
|
||||
},
|
||||
serde::Base64,
|
||||
state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId,
|
||||
OwnedServerName, OwnedUserId, RoomId, RoomVersionId, UserId,
|
||||
state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch,
|
||||
OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, RoomVersionId, UserId,
|
||||
};
|
||||
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
||||
use std::{
|
||||
|
@ -33,7 +31,10 @@ use tokio::sync::RwLock;
|
|||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use crate::{
|
||||
service::pdu::{gen_event_id_canonical_json, PduBuilder},
|
||||
service::{
|
||||
globals::SigningKeys,
|
||||
pdu::{gen_event_id_canonical_json, PduBuilder},
|
||||
},
|
||||
services, utils, Error, PduEvent, Result, Ruma,
|
||||
};
|
||||
|
||||
|
@ -55,21 +56,14 @@ pub async fn join_room_by_id_route(
|
|||
services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.servers_invite_via(&body.room_id)?
|
||||
.unwrap_or(
|
||||
services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.invite_state(sender_user, &body.room_id)?
|
||||
.unwrap_or_default()
|
||||
.iter()
|
||||
.filter_map(|event| serde_json::from_str(event.json().get()).ok())
|
||||
.filter_map(|event: serde_json::Value| event.get("sender").cloned())
|
||||
.filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
|
||||
.filter_map(|sender| UserId::parse(sender).ok())
|
||||
.map(|user| user.server_name().to_owned())
|
||||
.collect(),
|
||||
),
|
||||
.invite_state(sender_user, &body.room_id)?
|
||||
.unwrap_or_default()
|
||||
.iter()
|
||||
.filter_map(|event| serde_json::from_str(event.json().get()).ok())
|
||||
.filter_map(|event: serde_json::Value| event.get("sender").cloned())
|
||||
.filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
|
||||
.filter_map(|sender| UserId::parse(sender).ok())
|
||||
.map(|user| user.server_name().to_owned()),
|
||||
);
|
||||
|
||||
servers.push(
|
||||
|
@ -103,26 +97,19 @@ pub async fn join_room_by_id_or_alias_route(
|
|||
|
||||
let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) {
|
||||
Ok(room_id) => {
|
||||
let mut servers = body.server_name.clone();
|
||||
let mut servers = body.via.clone();
|
||||
servers.extend(
|
||||
services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.servers_invite_via(&room_id)?
|
||||
.unwrap_or(
|
||||
services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.invite_state(sender_user, &room_id)?
|
||||
.unwrap_or_default()
|
||||
.iter()
|
||||
.filter_map(|event| serde_json::from_str(event.json().get()).ok())
|
||||
.filter_map(|event: serde_json::Value| event.get("sender").cloned())
|
||||
.filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
|
||||
.filter_map(|sender| UserId::parse(sender).ok())
|
||||
.map(|user| user.server_name().to_owned())
|
||||
.collect(),
|
||||
),
|
||||
.invite_state(sender_user, &room_id)?
|
||||
.unwrap_or_default()
|
||||
.iter()
|
||||
.filter_map(|event| serde_json::from_str(event.json().get()).ok())
|
||||
.filter_map(|event: serde_json::Value| event.get("sender").cloned())
|
||||
.filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
|
||||
.filter_map(|sender| UserId::parse(sender).ok())
|
||||
.map(|user| user.server_name().to_owned()),
|
||||
);
|
||||
|
||||
servers.push(
|
||||
|
@ -201,7 +188,7 @@ pub async fn kick_user_route(
|
|||
) -> Result<kick_user::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
|
||||
let mut event: RoomMemberEventContent = serde_json::from_str(
|
||||
let event: RoomMemberEventContent = serde_json::from_str(
|
||||
services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
|
@ -212,15 +199,26 @@ pub async fn kick_user_route(
|
|||
)?
|
||||
.ok_or(Error::BadRequest(
|
||||
ErrorKind::BadState,
|
||||
"Cannot kick member that's not in the room.",
|
||||
"Cannot kick a user who is not in the room.",
|
||||
))?
|
||||
.content
|
||||
.get(),
|
||||
)
|
||||
.map_err(|_| Error::bad_database("Invalid member event in database."))?;
|
||||
|
||||
event.membership = MembershipState::Leave;
|
||||
event.reason = body.reason.clone();
|
||||
// If they are already kicked and the reason is unchanged, there isn't any point in sending a new event.
|
||||
if event.membership == MembershipState::Leave && event.reason == body.reason {
|
||||
return Ok(kick_user::v3::Response {});
|
||||
}
|
||||
|
||||
let event = RoomMemberEventContent {
|
||||
is_direct: None,
|
||||
membership: MembershipState::Leave,
|
||||
third_party_invite: None,
|
||||
reason: body.reason.clone(),
|
||||
join_authorized_via_users_server: None,
|
||||
..event
|
||||
};
|
||||
|
||||
let mutex_state = Arc::clone(
|
||||
services()
|
||||
|
@ -243,6 +241,7 @@ pub async fn kick_user_route(
|
|||
unsigned: None,
|
||||
state_key: Some(body.user_id.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
|
@ -261,7 +260,7 @@ pub async fn kick_user_route(
|
|||
pub async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<ban_user::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
|
||||
let event = services()
|
||||
let event = if let Some(event) = services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(
|
||||
|
@ -269,27 +268,30 @@ pub async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<ban_use
|
|||
&StateEventType::RoomMember,
|
||||
body.user_id.as_ref(),
|
||||
)?
|
||||
.map_or(
|
||||
Ok(RoomMemberEventContent {
|
||||
membership: MembershipState::Ban,
|
||||
displayname: services().users.displayname(&body.user_id)?,
|
||||
avatar_url: services().users.avatar_url(&body.user_id)?,
|
||||
is_direct: None,
|
||||
third_party_invite: None,
|
||||
blurhash: services().users.blurhash(&body.user_id)?,
|
||||
reason: body.reason.clone(),
|
||||
join_authorized_via_users_server: None,
|
||||
}),
|
||||
|event| {
|
||||
serde_json::from_str(event.content.get())
|
||||
.map(|event: RoomMemberEventContent| RoomMemberEventContent {
|
||||
membership: MembershipState::Ban,
|
||||
join_authorized_via_users_server: None,
|
||||
..event
|
||||
})
|
||||
.map_err(|_| Error::bad_database("Invalid member event in database."))
|
||||
},
|
||||
)?;
|
||||
// Even when the previous member content is invalid, we should let the ban go through anyways.
|
||||
.and_then(|event| serde_json::from_str::<RoomMemberEventContent>(event.content.get()).ok())
|
||||
{
|
||||
// If they are already banned and the reason is unchanged, there isn't any point in sending a new event.
|
||||
if event.membership == MembershipState::Ban && event.reason == body.reason {
|
||||
return Ok(ban_user::v3::Response {});
|
||||
}
|
||||
|
||||
RoomMemberEventContent {
|
||||
membership: MembershipState::Ban,
|
||||
join_authorized_via_users_server: None,
|
||||
reason: body.reason.clone(),
|
||||
third_party_invite: None,
|
||||
is_direct: None,
|
||||
avatar_url: event.avatar_url,
|
||||
displayname: event.displayname,
|
||||
blurhash: event.blurhash,
|
||||
}
|
||||
} else {
|
||||
RoomMemberEventContent {
|
||||
reason: body.reason.clone(),
|
||||
..RoomMemberEventContent::new(MembershipState::Ban)
|
||||
}
|
||||
};
|
||||
|
||||
let mutex_state = Arc::clone(
|
||||
services()
|
||||
|
@ -312,6 +314,7 @@ pub async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<ban_use
|
|||
unsigned: None,
|
||||
state_key: Some(body.user_id.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
|
@ -332,7 +335,7 @@ pub async fn unban_user_route(
|
|||
) -> Result<unban_user::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
|
||||
let mut event: RoomMemberEventContent = serde_json::from_str(
|
||||
let event: RoomMemberEventContent = serde_json::from_str(
|
||||
services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
|
@ -350,8 +353,19 @@ pub async fn unban_user_route(
|
|||
)
|
||||
.map_err(|_| Error::bad_database("Invalid member event in database."))?;
|
||||
|
||||
event.membership = MembershipState::Leave;
|
||||
event.reason = body.reason.clone();
|
||||
// If they are already unbanned and the reason is unchanged, there isn't any point in sending a new event.
|
||||
if event.membership == MembershipState::Leave && event.reason == body.reason {
|
||||
return Ok(unban_user::v3::Response {});
|
||||
}
|
||||
|
||||
let event = RoomMemberEventContent {
|
||||
is_direct: None,
|
||||
membership: MembershipState::Leave,
|
||||
third_party_invite: None,
|
||||
reason: body.reason.clone(),
|
||||
join_authorized_via_users_server: None,
|
||||
..event
|
||||
};
|
||||
|
||||
let mutex_state = Arc::clone(
|
||||
services()
|
||||
|
@ -374,6 +388,7 @@ pub async fn unban_user_route(
|
|||
unsigned: None,
|
||||
state_key: Some(body.user_id.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
|
@ -441,7 +456,7 @@ pub async fn get_member_events_route(
|
|||
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"You don't have permission to view this room.",
|
||||
));
|
||||
}
|
||||
|
@ -476,7 +491,7 @@ pub async fn joined_members_route(
|
|||
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"You don't have permission to view this room.",
|
||||
));
|
||||
}
|
||||
|
@ -512,6 +527,12 @@ async fn join_room_by_id_helper(
|
|||
) -> Result<join_room_by_id::v3::Response> {
|
||||
let sender_user = sender_user.expect("user is authenticated");
|
||||
|
||||
if let Ok(true) = services().rooms.state_cache.is_joined(sender_user, room_id) {
|
||||
return Ok(join_room_by_id::v3::Response {
|
||||
room_id: room_id.into(),
|
||||
});
|
||||
}
|
||||
|
||||
let mutex_state = Arc::clone(
|
||||
services()
|
||||
.globals
|
||||
|
@ -606,7 +627,7 @@ async fn join_room_by_id_helper(
|
|||
let event_id = format!(
|
||||
"${}",
|
||||
ruma::signatures::reference_hash(&join_event_stub, &room_version_id)
|
||||
.expect("ruma can calculate reference hashes")
|
||||
.expect("Event format validated when event was hashed")
|
||||
);
|
||||
let event_id = <&EventId>::try_from(event_id.as_str())
|
||||
.expect("ruma's reference hashes are valid event ids");
|
||||
|
@ -838,11 +859,6 @@ async fn join_room_by_id_helper(
|
|||
&StateEventType::RoomJoinRules,
|
||||
"",
|
||||
)?;
|
||||
let power_levels_event = services().rooms.state_accessor.room_state_get(
|
||||
room_id,
|
||||
&StateEventType::RoomPowerLevels,
|
||||
"",
|
||||
)?;
|
||||
|
||||
let join_rules_event_content: Option<RoomJoinRulesEventContent> = join_rules_event
|
||||
.as_ref()
|
||||
|
@ -853,15 +869,6 @@ async fn join_room_by_id_helper(
|
|||
})
|
||||
})
|
||||
.transpose()?;
|
||||
let power_levels_event_content: Option<RoomPowerLevelsEventContent> = power_levels_event
|
||||
.as_ref()
|
||||
.map(|power_levels_event| {
|
||||
serde_json::from_str(power_levels_event.content.get()).map_err(|e| {
|
||||
warn!("Invalid power levels event: {}", e);
|
||||
Error::bad_database("Invalid power levels event in db.")
|
||||
})
|
||||
})
|
||||
.transpose()?;
|
||||
|
||||
let restriction_rooms = match join_rules_event_content {
|
||||
Some(RoomJoinRulesEventContent {
|
||||
|
@ -880,47 +887,37 @@ async fn join_room_by_id_helper(
|
|||
_ => Vec::new(),
|
||||
};
|
||||
|
||||
let authorized_user = restriction_rooms
|
||||
.iter()
|
||||
.find_map(|restriction_room_id| {
|
||||
if !services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_joined(sender_user, restriction_room_id)
|
||||
.ok()?
|
||||
let authorized_user = if restriction_rooms.iter().any(|restriction_room_id| {
|
||||
services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_joined(sender_user, restriction_room_id)
|
||||
.unwrap_or(false)
|
||||
}) {
|
||||
let mut auth_user = None;
|
||||
for user in services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(room_id)
|
||||
.filter_map(Result::ok)
|
||||
.collect::<Vec<_>>()
|
||||
{
|
||||
if user.server_name() == services().globals.server_name()
|
||||
&& services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_invite(room_id, &user, sender_user, &state_lock)
|
||||
.await
|
||||
.unwrap_or(false)
|
||||
{
|
||||
return None;
|
||||
auth_user = Some(user);
|
||||
break;
|
||||
}
|
||||
let authorized_user = power_levels_event_content
|
||||
.as_ref()
|
||||
.and_then(|c| {
|
||||
c.users
|
||||
.iter()
|
||||
.filter(|(uid, i)| {
|
||||
uid.server_name() == services().globals.server_name()
|
||||
&& **i > ruma::int!(0)
|
||||
&& services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_joined(uid, restriction_room_id)
|
||||
.unwrap_or(false)
|
||||
})
|
||||
.max_by_key(|(_, i)| *i)
|
||||
.map(|(u, _)| u.to_owned())
|
||||
})
|
||||
.or_else(|| {
|
||||
// TODO: Check here if user is actually allowed to invite. Currently the auth
|
||||
// check will just fail in this case.
|
||||
services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(restriction_room_id)
|
||||
.filter_map(|r| r.ok())
|
||||
.find(|uid| uid.server_name() == services().globals.server_name())
|
||||
});
|
||||
Some(authorized_user)
|
||||
})
|
||||
.flatten();
|
||||
}
|
||||
auth_user
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let event = RoomMemberEventContent {
|
||||
membership: MembershipState::Join,
|
||||
|
@ -944,6 +941,7 @@ async fn join_room_by_id_helper(
|
|||
unsigned: None,
|
||||
state_key: Some(sender_user.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
room_id,
|
||||
|
@ -958,9 +956,7 @@ async fn join_room_by_id_helper(
|
|||
if !restriction_rooms.is_empty()
|
||||
&& servers
|
||||
.iter()
|
||||
.filter(|s| *s != services().globals.server_name())
|
||||
.count()
|
||||
> 0
|
||||
.any(|s| *s != services().globals.server_name())
|
||||
{
|
||||
info!(
|
||||
"We couldn't do the join locally, maybe federation can help to satisfy the restricted join requirements"
|
||||
|
@ -991,6 +987,8 @@ async fn join_room_by_id_helper(
|
|||
.as_str()
|
||||
})
|
||||
.and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok());
|
||||
let restricted_join = join_authorized_via_users_server.is_some();
|
||||
|
||||
// TODO: Is origin needed?
|
||||
join_event_stub.insert(
|
||||
"origin".to_owned(),
|
||||
|
@ -1037,7 +1035,7 @@ async fn join_room_by_id_helper(
|
|||
ruma::signatures::reference_hash(&join_event_stub, &room_version_id)
|
||||
.expect("ruma can calculate reference hashes")
|
||||
);
|
||||
let event_id = <&EventId>::try_from(event_id.as_str())
|
||||
let event_id = OwnedEventId::try_from(event_id)
|
||||
.expect("ruma's reference hashes are valid event ids");
|
||||
|
||||
// Add event_id back
|
||||
|
@ -1062,43 +1060,32 @@ async fn join_room_by_id_helper(
|
|||
)
|
||||
.await?;
|
||||
|
||||
if let Some(signed_raw) = send_join_response.room_state.event {
|
||||
let (signed_event_id, signed_value) =
|
||||
match gen_event_id_canonical_json(&signed_raw, &room_version_id) {
|
||||
Ok(t) => t,
|
||||
Err(_) => {
|
||||
// Event could not be converted to canonical json
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Could not convert event to canonical json.",
|
||||
));
|
||||
}
|
||||
};
|
||||
let pdu = if let Some(signed_raw) = send_join_response.room_state.event {
|
||||
let (signed_event_id, signed_pdu) =
|
||||
gen_event_id_canonical_json(&signed_raw, &room_version_id)?;
|
||||
|
||||
if signed_event_id != event_id {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
return Err(Error::BadServerResponse(
|
||||
"Server sent event with wrong event id",
|
||||
));
|
||||
}
|
||||
|
||||
drop(state_lock);
|
||||
let pub_key_map = RwLock::new(BTreeMap::new());
|
||||
services()
|
||||
.rooms
|
||||
.event_handler
|
||||
.handle_incoming_pdu(
|
||||
&remote_server,
|
||||
&signed_event_id,
|
||||
room_id,
|
||||
signed_value,
|
||||
true,
|
||||
&pub_key_map,
|
||||
)
|
||||
.await?;
|
||||
signed_pdu
|
||||
} else if restricted_join {
|
||||
return Err(Error::BadServerResponse(
|
||||
"No signed event was returned, despite just performing a restricted join",
|
||||
));
|
||||
} else {
|
||||
return Err(error);
|
||||
}
|
||||
join_event
|
||||
};
|
||||
|
||||
drop(state_lock);
|
||||
let pub_key_map = RwLock::new(BTreeMap::new());
|
||||
services()
|
||||
.rooms
|
||||
.event_handler
|
||||
.handle_incoming_pdu(&remote_server, &event_id, room_id, pdu, true, &pub_key_map)
|
||||
.await?;
|
||||
} else {
|
||||
return Err(error);
|
||||
}
|
||||
|
@ -1149,7 +1136,7 @@ async fn make_join_request(
|
|||
async fn validate_and_add_event_id(
|
||||
pdu: &RawJsonValue,
|
||||
room_version: &RoomVersionId,
|
||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>,
|
||||
) -> Result<(OwnedEventId, CanonicalJsonObject)> {
|
||||
let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
|
||||
error!("Invalid PDU in server response: {:?}: {:?}", pdu, e);
|
||||
|
@ -1158,7 +1145,7 @@ async fn validate_and_add_event_id(
|
|||
let event_id = EventId::parse(format!(
|
||||
"${}",
|
||||
ruma::signatures::reference_hash(&value, room_version)
|
||||
.expect("ruma can calculate reference hashes")
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid PDU format"))?
|
||||
))
|
||||
.expect("ruma's reference hashes are valid event ids");
|
||||
|
||||
|
@ -1196,8 +1183,35 @@ async fn validate_and_add_event_id(
|
|||
}
|
||||
}
|
||||
|
||||
if let Err(e) = ruma::signatures::verify_event(&*pub_key_map.read().await, &value, room_version)
|
||||
{
|
||||
let origin_server_ts = value.get("origin_server_ts").ok_or_else(|| {
|
||||
error!("Invalid PDU, no origin_server_ts field");
|
||||
Error::BadRequest(
|
||||
ErrorKind::MissingParam,
|
||||
"Invalid PDU, no origin_server_ts field",
|
||||
)
|
||||
})?;
|
||||
|
||||
let origin_server_ts: MilliSecondsSinceUnixEpoch = {
|
||||
let ts = origin_server_ts.as_integer().ok_or_else(|| {
|
||||
Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"origin_server_ts must be an integer",
|
||||
)
|
||||
})?;
|
||||
|
||||
MilliSecondsSinceUnixEpoch(i64::from(ts).try_into().map_err(|_| {
|
||||
Error::BadRequest(ErrorKind::InvalidParam, "Time must be after the unix epoch")
|
||||
})?)
|
||||
};
|
||||
|
||||
let unfiltered_keys = (*pub_key_map.read().await).clone();
|
||||
|
||||
let keys =
|
||||
services()
|
||||
.globals
|
||||
.filter_keys_server_map(unfiltered_keys, origin_server_ts, room_version);
|
||||
|
||||
if let Err(e) = ruma::signatures::verify_event(&keys, &value, room_version) {
|
||||
warn!("Event {} failed verification {:?} {}", event_id, pdu, e);
|
||||
back_off(event_id).await;
|
||||
return Err(Error::BadServerResponse("Event failed verification."));
|
||||
|
@ -1250,6 +1264,7 @@ pub(crate) async fn invite_helper<'a>(
|
|||
unsigned: None,
|
||||
state_key: Some(user_id.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
room_id,
|
||||
|
@ -1275,7 +1290,6 @@ pub(crate) async fn invite_helper<'a>(
|
|||
room_version: room_version_id.clone(),
|
||||
event: PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()),
|
||||
invite_room_state,
|
||||
via: services().rooms.state_cache.servers_route_via(room_id).ok(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
@ -1327,60 +1341,60 @@ pub(crate) async fn invite_helper<'a>(
|
|||
.filter(|server| &**server != services().globals.server_name());
|
||||
|
||||
services().sending.send_pdu(servers, &pdu_id)?;
|
||||
} else {
|
||||
if !services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_joined(sender_user, room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::forbidden(),
|
||||
"You don't have permission to view this room.",
|
||||
));
|
||||
}
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
let mutex_state = Arc::clone(
|
||||
services()
|
||||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
if !services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_joined(sender_user, room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
"You don't have permission to view this room.",
|
||||
));
|
||||
}
|
||||
|
||||
let mutex_state = Arc::clone(
|
||||
services()
|
||||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
membership: MembershipState::Invite,
|
||||
displayname: services().users.displayname(user_id)?,
|
||||
avatar_url: services().users.avatar_url(user_id)?,
|
||||
is_direct: Some(is_direct),
|
||||
third_party_invite: None,
|
||||
blurhash: services().users.blurhash(user_id)?,
|
||||
reason,
|
||||
join_authorized_via_users_server: None,
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(user_id.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
membership: MembershipState::Invite,
|
||||
displayname: services().users.displayname(user_id)?,
|
||||
avatar_url: services().users.avatar_url(user_id)?,
|
||||
is_direct: Some(is_direct),
|
||||
third_party_invite: None,
|
||||
blurhash: services().users.blurhash(user_id)?,
|
||||
reason,
|
||||
join_authorized_via_users_server: None,
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(user_id.to_string()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
drop(state_lock);
|
||||
// Critical point ends
|
||||
drop(state_lock);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1414,8 +1428,10 @@ pub async fn leave_all_rooms(user_id: &UserId) -> Result<()> {
|
|||
|
||||
pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<String>) -> Result<()> {
|
||||
// Ask a remote server if we don't have this room
|
||||
if !services().rooms.metadata.exists(room_id)?
|
||||
&& room_id.server_name() != Some(services().globals.server_name())
|
||||
if !services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.server_in_room(services().globals.server_name(), room_id)?
|
||||
{
|
||||
if let Err(e) = remote_leave_room(user_id, room_id).await {
|
||||
warn!("Failed to leave room {} remotely: {}", user_id, e);
|
||||
|
@ -1438,7 +1454,6 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<Strin
|
|||
MembershipState::Leave,
|
||||
user_id,
|
||||
last_state,
|
||||
None,
|
||||
true,
|
||||
)?;
|
||||
} else {
|
||||
|
@ -1470,7 +1485,6 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<Strin
|
|||
MembershipState::Leave,
|
||||
user_id,
|
||||
None,
|
||||
None,
|
||||
true,
|
||||
)?;
|
||||
return Ok(());
|
||||
|
@ -1478,12 +1492,15 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<Strin
|
|||
Some(e) => e,
|
||||
};
|
||||
|
||||
let mut event: RoomMemberEventContent = serde_json::from_str(member_event.content.get())
|
||||
.map_err(|_| Error::bad_database("Invalid member event in database."))?;
|
||||
|
||||
event.membership = MembershipState::Leave;
|
||||
event.reason = reason;
|
||||
event.join_authorized_via_users_server = None;
|
||||
let event = RoomMemberEventContent {
|
||||
is_direct: None,
|
||||
membership: MembershipState::Leave,
|
||||
third_party_invite: None,
|
||||
reason,
|
||||
join_authorized_via_users_server: None,
|
||||
..serde_json::from_str(member_event.content.get())
|
||||
.map_err(|_| Error::bad_database("Invalid member event in database."))?
|
||||
};
|
||||
|
||||
services()
|
||||
.rooms
|
||||
|
@ -1495,6 +1512,7 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<Strin
|
|||
unsigned: None,
|
||||
state_key: Some(user_id.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
user_id,
|
||||
room_id,
|
||||
|
@ -1520,21 +1538,14 @@ async fn remote_leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
|||
"User is not invited.",
|
||||
))?;
|
||||
|
||||
let servers: HashSet<_> = services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.servers_invite_via(&room_id)?
|
||||
.map(|servers| HashSet::from_iter(servers))
|
||||
.unwrap_or(
|
||||
invite_state
|
||||
.iter()
|
||||
.filter_map(|event| serde_json::from_str(event.json().get()).ok())
|
||||
.filter_map(|event: serde_json::Value| event.get("sender").cloned())
|
||||
.filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
|
||||
.filter_map(|sender| UserId::parse(sender).ok())
|
||||
.map(|user| user.server_name().to_owned())
|
||||
.collect(),
|
||||
);
|
||||
let servers: HashSet<_> = invite_state
|
||||
.iter()
|
||||
.filter_map(|event| serde_json::from_str(event.json().get()).ok())
|
||||
.filter_map(|event: serde_json::Value| event.get("sender").cloned())
|
||||
.filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
|
||||
.filter_map(|sender| UserId::parse(sender).ok())
|
||||
.map(|user| user.server_name().to_owned())
|
||||
.collect();
|
||||
|
||||
for remote_server in servers {
|
||||
let make_leave_response = services()
|
||||
|
@ -1603,7 +1614,7 @@ async fn remote_leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
|||
let event_id = EventId::parse(format!(
|
||||
"${}",
|
||||
ruma::signatures::reference_hash(&leave_event_stub, &room_version_id)
|
||||
.expect("ruma can calculate reference hashes")
|
||||
.expect("Event format validated when event was hashed")
|
||||
))
|
||||
.expect("ruma's reference hashes are valid event ids");
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ pub async fn send_message_event_route(
|
|||
&& !services().globals.allow_encryption()
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"Encryption has been disabled",
|
||||
));
|
||||
}
|
||||
|
@ -84,6 +84,11 @@ pub async fn send_message_event_route(
|
|||
unsigned: Some(unsigned),
|
||||
state_key: None,
|
||||
redacts: None,
|
||||
timestamp: if body.appservice_info.is_some() {
|
||||
body.timestamp
|
||||
} else {
|
||||
None
|
||||
},
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
|
|
|
@ -11,6 +11,7 @@ mod keys;
|
|||
mod media;
|
||||
mod membership;
|
||||
mod message;
|
||||
mod openid;
|
||||
mod presence;
|
||||
mod profile;
|
||||
mod push;
|
||||
|
@ -32,6 +33,7 @@ mod typing;
|
|||
mod unversioned;
|
||||
mod user_directory;
|
||||
mod voip;
|
||||
mod well_known;
|
||||
|
||||
pub use account::*;
|
||||
pub use alias::*;
|
||||
|
@ -46,6 +48,7 @@ pub use keys::*;
|
|||
pub use media::*;
|
||||
pub use membership::*;
|
||||
pub use message::*;
|
||||
pub use openid::*;
|
||||
pub use presence::*;
|
||||
pub use profile::*;
|
||||
pub use push::*;
|
||||
|
@ -67,6 +70,7 @@ pub use typing::*;
|
|||
pub use unversioned::*;
|
||||
pub use user_directory::*;
|
||||
pub use voip::*;
|
||||
pub use well_known::*;
|
||||
|
||||
pub const DEVICE_ID_LENGTH: usize = 10;
|
||||
pub const TOKEN_LENGTH: usize = 32;
|
||||
|
|
23
src/api/client_server/openid.rs
Normal file
23
src/api/client_server/openid.rs
Normal file
|
@ -0,0 +1,23 @@
|
|||
use std::time::Duration;
|
||||
|
||||
use ruma::{api::client::account, authentication::TokenType};
|
||||
|
||||
use crate::{services, Result, Ruma};
|
||||
|
||||
/// # `POST /_matrix/client/r0/user/{userId}/openid/request_token`
|
||||
///
|
||||
/// Request an OpenID token to verify identity with third-party services.
|
||||
///
|
||||
/// - The token generated is only valid for the OpenID API.
|
||||
pub async fn create_openid_token_route(
|
||||
body: Ruma<account::request_openid_token::v3::Request>,
|
||||
) -> Result<account::request_openid_token::v3::Response> {
|
||||
let (access_token, expires_in) = services().users.create_openid_token(&body.user_id)?;
|
||||
|
||||
Ok(account::request_openid_token::v3::Response {
|
||||
access_token,
|
||||
token_type: TokenType::Bearer,
|
||||
matrix_server_name: services().globals.server_name().to_owned(),
|
||||
expires_in: Duration::from_secs(expires_in),
|
||||
})
|
||||
}
|
|
@ -65,6 +65,7 @@ pub async fn set_displayname_route(
|
|||
unsigned: None,
|
||||
state_key: Some(sender_user.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
room_id,
|
||||
))
|
||||
|
@ -200,6 +201,7 @@ pub async fn set_avatar_url_route(
|
|||
unsigned: None,
|
||||
state_key: Some(sender_user.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
room_id,
|
||||
))
|
||||
|
|
|
@ -44,6 +44,7 @@ pub async fn redact_event_route(
|
|||
unsigned: None,
|
||||
state_key: None,
|
||||
redacts: Some(body.event_id.into()),
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
|
|
|
@ -3,7 +3,7 @@ use ruma::api::client::relations::{
|
|||
get_relating_events_with_rel_type_and_event_type,
|
||||
};
|
||||
|
||||
use crate::{service::rooms::timeline::PduCount, services, Result, Ruma};
|
||||
use crate::{services, Result, Ruma};
|
||||
|
||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}`
|
||||
pub async fn get_relating_events_with_rel_type_and_event_type_route(
|
||||
|
@ -11,27 +11,6 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route(
|
|||
) -> Result<get_relating_events_with_rel_type_and_event_type::v1::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
|
||||
let from = match body.from.clone() {
|
||||
Some(from) => PduCount::try_from_string(&from)?,
|
||||
None => match ruma::api::Direction::Backward {
|
||||
// TODO: fix ruma so `body.dir` exists
|
||||
ruma::api::Direction::Forward => PduCount::min(),
|
||||
ruma::api::Direction::Backward => PduCount::max(),
|
||||
},
|
||||
};
|
||||
|
||||
let to = body
|
||||
.to
|
||||
.as_ref()
|
||||
.and_then(|t| PduCount::try_from_string(t).ok());
|
||||
|
||||
// Use limit or else 10, with maximum 100
|
||||
let limit = body
|
||||
.limit
|
||||
.and_then(|u| u32::try_from(u).ok())
|
||||
.map_or(10_usize, |u| u as usize)
|
||||
.min(100);
|
||||
|
||||
let res = services()
|
||||
.rooms
|
||||
.pdu_metadata
|
||||
|
@ -41,9 +20,11 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route(
|
|||
&body.event_id,
|
||||
Some(body.event_type.clone()),
|
||||
Some(body.rel_type.clone()),
|
||||
from,
|
||||
to,
|
||||
limit,
|
||||
body.from.clone(),
|
||||
body.to.clone(),
|
||||
body.limit,
|
||||
body.recurse,
|
||||
&body.dir,
|
||||
)?;
|
||||
|
||||
Ok(
|
||||
|
@ -51,6 +32,7 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route(
|
|||
chunk: res.chunk,
|
||||
next_batch: res.next_batch,
|
||||
prev_batch: res.prev_batch,
|
||||
recursion_depth: res.recursion_depth,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
@ -61,27 +43,6 @@ pub async fn get_relating_events_with_rel_type_route(
|
|||
) -> Result<get_relating_events_with_rel_type::v1::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
|
||||
let from = match body.from.clone() {
|
||||
Some(from) => PduCount::try_from_string(&from)?,
|
||||
None => match ruma::api::Direction::Backward {
|
||||
// TODO: fix ruma so `body.dir` exists
|
||||
ruma::api::Direction::Forward => PduCount::min(),
|
||||
ruma::api::Direction::Backward => PduCount::max(),
|
||||
},
|
||||
};
|
||||
|
||||
let to = body
|
||||
.to
|
||||
.as_ref()
|
||||
.and_then(|t| PduCount::try_from_string(t).ok());
|
||||
|
||||
// Use limit or else 10, with maximum 100
|
||||
let limit = body
|
||||
.limit
|
||||
.and_then(|u| u32::try_from(u).ok())
|
||||
.map_or(10_usize, |u| u as usize)
|
||||
.min(100);
|
||||
|
||||
let res = services()
|
||||
.rooms
|
||||
.pdu_metadata
|
||||
|
@ -91,15 +52,18 @@ pub async fn get_relating_events_with_rel_type_route(
|
|||
&body.event_id,
|
||||
None,
|
||||
Some(body.rel_type.clone()),
|
||||
from,
|
||||
to,
|
||||
limit,
|
||||
body.from.clone(),
|
||||
body.to.clone(),
|
||||
body.limit,
|
||||
body.recurse,
|
||||
&body.dir,
|
||||
)?;
|
||||
|
||||
Ok(get_relating_events_with_rel_type::v1::Response {
|
||||
chunk: res.chunk,
|
||||
next_batch: res.next_batch,
|
||||
prev_batch: res.prev_batch,
|
||||
recursion_depth: res.recursion_depth,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -109,27 +73,6 @@ pub async fn get_relating_events_route(
|
|||
) -> Result<get_relating_events::v1::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
|
||||
let from = match body.from.clone() {
|
||||
Some(from) => PduCount::try_from_string(&from)?,
|
||||
None => match ruma::api::Direction::Backward {
|
||||
// TODO: fix ruma so `body.dir` exists
|
||||
ruma::api::Direction::Forward => PduCount::min(),
|
||||
ruma::api::Direction::Backward => PduCount::max(),
|
||||
},
|
||||
};
|
||||
|
||||
let to = body
|
||||
.to
|
||||
.as_ref()
|
||||
.and_then(|t| PduCount::try_from_string(t).ok());
|
||||
|
||||
// Use limit or else 10, with maximum 100
|
||||
let limit = body
|
||||
.limit
|
||||
.and_then(|u| u32::try_from(u).ok())
|
||||
.map_or(10_usize, |u| u as usize)
|
||||
.min(100);
|
||||
|
||||
services()
|
||||
.rooms
|
||||
.pdu_metadata
|
||||
|
@ -139,8 +82,10 @@ pub async fn get_relating_events_route(
|
|||
&body.event_id,
|
||||
None,
|
||||
None,
|
||||
from,
|
||||
to,
|
||||
limit,
|
||||
body.from.clone(),
|
||||
body.to.clone(),
|
||||
body.limit,
|
||||
body.recurse,
|
||||
&body.dir,
|
||||
)
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ use ruma::{
|
|||
},
|
||||
int,
|
||||
serde::JsonObject,
|
||||
CanonicalJsonObject, OwnedRoomAliasId, RoomAliasId, RoomId,
|
||||
CanonicalJsonObject, OwnedRoomAliasId, RoomAliasId, RoomId, RoomVersionId,
|
||||
};
|
||||
use serde_json::{json, value::to_raw_value};
|
||||
use std::{cmp::max, collections::BTreeMap, sync::Arc};
|
||||
|
@ -68,11 +68,11 @@ pub async fn create_room_route(
|
|||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
if !services().globals.allow_room_creation()
|
||||
&& !body.from_appservice
|
||||
&& body.appservice_info.is_none()
|
||||
&& !services().users.is_admin(sender_user)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"Room creation has been disabled.",
|
||||
));
|
||||
}
|
||||
|
@ -104,6 +104,22 @@ pub async fn create_room_route(
|
|||
}
|
||||
})?;
|
||||
|
||||
if let Some(ref alias) = alias {
|
||||
if let Some(ref info) = body.appservice_info {
|
||||
if !info.aliases.is_match(alias.as_str()) {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"Room alias is not in namespace.",
|
||||
));
|
||||
}
|
||||
} else if services().appservice.is_exclusive_alias(alias).await {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"Room alias reserved by appservice.",
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
let room_version = match body.room_version.clone() {
|
||||
Some(room_version) => {
|
||||
if services()
|
||||
|
@ -127,12 +143,29 @@ pub async fn create_room_route(
|
|||
let mut content = content
|
||||
.deserialize_as::<CanonicalJsonObject>()
|
||||
.expect("Invalid creation content");
|
||||
content.insert(
|
||||
"creator".into(),
|
||||
json!(&sender_user).try_into().map_err(|_| {
|
||||
Error::BadRequest(ErrorKind::BadJson, "Invalid creation content")
|
||||
})?,
|
||||
);
|
||||
|
||||
match room_version {
|
||||
RoomVersionId::V1
|
||||
| RoomVersionId::V2
|
||||
| RoomVersionId::V3
|
||||
| RoomVersionId::V4
|
||||
| RoomVersionId::V5
|
||||
| RoomVersionId::V6
|
||||
| RoomVersionId::V7
|
||||
| RoomVersionId::V8
|
||||
| RoomVersionId::V9
|
||||
| RoomVersionId::V10 => {
|
||||
content.insert(
|
||||
"creator".into(),
|
||||
json!(&sender_user).try_into().map_err(|_| {
|
||||
Error::BadRequest(ErrorKind::BadJson, "Invalid creation content")
|
||||
})?,
|
||||
);
|
||||
}
|
||||
RoomVersionId::V11 => {} // V11 removed the "creator" key
|
||||
_ => unreachable!("Validity of room version already checked"),
|
||||
}
|
||||
|
||||
content.insert(
|
||||
"room_version".into(),
|
||||
json!(room_version.as_str()).try_into().map_err(|_| {
|
||||
|
@ -142,9 +175,22 @@ pub async fn create_room_route(
|
|||
content
|
||||
}
|
||||
None => {
|
||||
// TODO: Add correct value for v11
|
||||
let content = match room_version {
|
||||
RoomVersionId::V1
|
||||
| RoomVersionId::V2
|
||||
| RoomVersionId::V3
|
||||
| RoomVersionId::V4
|
||||
| RoomVersionId::V5
|
||||
| RoomVersionId::V6
|
||||
| RoomVersionId::V7
|
||||
| RoomVersionId::V8
|
||||
| RoomVersionId::V9
|
||||
| RoomVersionId::V10 => RoomCreateEventContent::new_v1(sender_user.clone()),
|
||||
RoomVersionId::V11 => RoomCreateEventContent::new_v11(),
|
||||
_ => unreachable!("Validity of room version already checked"),
|
||||
};
|
||||
let mut content = serde_json::from_str::<CanonicalJsonObject>(
|
||||
to_raw_value(&RoomCreateEventContent::new_v1(sender_user.clone()))
|
||||
to_raw_value(&content)
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))?
|
||||
.get(),
|
||||
)
|
||||
|
@ -184,6 +230,7 @@ pub async fn create_room_route(
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
|
@ -212,6 +259,7 @@ pub async fn create_room_route(
|
|||
unsigned: None,
|
||||
state_key: Some(sender_user.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
|
@ -265,6 +313,7 @@ pub async fn create_room_route(
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
|
@ -288,6 +337,7 @@ pub async fn create_room_route(
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
|
@ -314,6 +364,7 @@ pub async fn create_room_route(
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
|
@ -335,6 +386,7 @@ pub async fn create_room_route(
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
|
@ -357,6 +409,7 @@ pub async fn create_room_route(
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
|
@ -401,6 +454,7 @@ pub async fn create_room_route(
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
|
@ -423,6 +477,7 @@ pub async fn create_room_route(
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
|
@ -439,7 +494,10 @@ pub async fn create_room_route(
|
|||
|
||||
// Homeserver specific stuff
|
||||
if let Some(alias) = alias {
|
||||
services().rooms.alias.set_alias(&alias, &room_id)?;
|
||||
services()
|
||||
.rooms
|
||||
.alias
|
||||
.set_alias(&alias, &room_id, sender_user)?;
|
||||
}
|
||||
|
||||
if body.visibility == room::Visibility::Public {
|
||||
|
@ -476,7 +534,7 @@ pub async fn get_room_event_route(
|
|||
&body.event_id,
|
||||
)? {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"You don't have permission to view this event.",
|
||||
));
|
||||
}
|
||||
|
@ -505,7 +563,7 @@ pub async fn get_room_aliases_route(
|
|||
.is_joined(sender_user, &body.room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"You don't have permission to view this room.",
|
||||
));
|
||||
}
|
||||
|
@ -580,6 +638,7 @@ pub async fn upgrade_room_route(
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
|
@ -619,12 +678,30 @@ pub async fn upgrade_room_route(
|
|||
));
|
||||
|
||||
// Send a m.room.create event containing a predecessor field and the applicable room_version
|
||||
create_event_content.insert(
|
||||
"creator".into(),
|
||||
json!(&sender_user)
|
||||
.try_into()
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?,
|
||||
);
|
||||
match body.new_version {
|
||||
RoomVersionId::V1
|
||||
| RoomVersionId::V2
|
||||
| RoomVersionId::V3
|
||||
| RoomVersionId::V4
|
||||
| RoomVersionId::V5
|
||||
| RoomVersionId::V6
|
||||
| RoomVersionId::V7
|
||||
| RoomVersionId::V8
|
||||
| RoomVersionId::V9
|
||||
| RoomVersionId::V10 => {
|
||||
create_event_content.insert(
|
||||
"creator".into(),
|
||||
json!(&sender_user).try_into().map_err(|_| {
|
||||
Error::BadRequest(ErrorKind::BadJson, "Error forming creation event")
|
||||
})?,
|
||||
);
|
||||
}
|
||||
RoomVersionId::V11 => {
|
||||
// "creator" key no longer exists in V11 rooms
|
||||
create_event_content.remove("creator");
|
||||
}
|
||||
_ => unreachable!("Validity of room version already checked"),
|
||||
}
|
||||
create_event_content.insert(
|
||||
"room_version".into(),
|
||||
json!(&body.new_version)
|
||||
|
@ -663,6 +740,7 @@ pub async fn upgrade_room_route(
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&replacement_room,
|
||||
|
@ -691,6 +769,7 @@ pub async fn upgrade_room_route(
|
|||
unsigned: None,
|
||||
state_key: Some(sender_user.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&replacement_room,
|
||||
|
@ -733,6 +812,7 @@ pub async fn upgrade_room_route(
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&replacement_room,
|
||||
|
@ -751,7 +831,7 @@ pub async fn upgrade_room_route(
|
|||
services()
|
||||
.rooms
|
||||
.alias
|
||||
.set_alias(&alias, &replacement_room)?;
|
||||
.set_alias(&alias, &replacement_room, sender_user)?;
|
||||
}
|
||||
|
||||
// Get the old room power levels
|
||||
|
@ -783,6 +863,7 @@ pub async fn upgrade_room_route(
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
|
|
|
@ -43,7 +43,7 @@ pub async fn search_events_route(
|
|||
.is_joined(sender_user, &room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"You don't have permission to view this room.",
|
||||
));
|
||||
}
|
||||
|
@ -89,11 +89,12 @@ pub async fn search_events_route(
|
|||
.get_pdu_from_id(result)
|
||||
.ok()?
|
||||
.filter(|pdu| {
|
||||
services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id)
|
||||
.unwrap_or(false)
|
||||
!pdu.is_redacted()
|
||||
&& services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id)
|
||||
.unwrap_or(false)
|
||||
})
|
||||
.map(|pdu| pdu.to_room_event())
|
||||
})
|
||||
|
|
|
@ -63,15 +63,22 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
|||
UserId::parse(user)
|
||||
} else {
|
||||
warn!("Bad login type: {:?}", &body.login_info);
|
||||
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
|
||||
return Err(Error::BadRequest(ErrorKind::forbidden(), "Bad login type."));
|
||||
}
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;
|
||||
|
||||
if services().appservice.is_exclusive_user_id(&user_id).await {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"User id reserved by appservice.",
|
||||
));
|
||||
}
|
||||
|
||||
let hash = services()
|
||||
.users
|
||||
.password_hash(&user_id)?
|
||||
.ok_or(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"Wrong username or password.",
|
||||
))?;
|
||||
|
||||
|
@ -86,7 +93,7 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
|||
|
||||
if !hash_matches {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"Wrong username or password.",
|
||||
));
|
||||
}
|
||||
|
@ -102,9 +109,20 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
|||
)
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Token is invalid."))?;
|
||||
let username = token.claims.sub.to_lowercase();
|
||||
UserId::parse_with_server_name(username, services().globals.server_name()).map_err(
|
||||
|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."),
|
||||
)?
|
||||
let user_id =
|
||||
UserId::parse_with_server_name(username, services().globals.server_name())
|
||||
.map_err(|_| {
|
||||
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
|
||||
})?;
|
||||
|
||||
if services().appservice.is_exclusive_user_id(&user_id).await {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"User id reserved by appservice.",
|
||||
));
|
||||
}
|
||||
|
||||
user_id
|
||||
} else {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Unknown,
|
||||
|
@ -116,13 +134,7 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
|||
identifier,
|
||||
user,
|
||||
}) => {
|
||||
if !body.from_appservice {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::MissingToken,
|
||||
"Missing appservice token.",
|
||||
));
|
||||
};
|
||||
if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
|
||||
let user_id = if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
|
||||
UserId::parse_with_server_name(
|
||||
user_id.to_lowercase(),
|
||||
services().globals.server_name(),
|
||||
|
@ -131,9 +143,25 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
|||
UserId::parse(user)
|
||||
} else {
|
||||
warn!("Bad login type: {:?}", &body.login_info);
|
||||
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
|
||||
return Err(Error::BadRequest(ErrorKind::forbidden(), "Bad login type."));
|
||||
}
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;
|
||||
|
||||
if let Some(ref info) = body.appservice_info {
|
||||
if !info.is_user_match(&user_id) {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"User is not in namespace.",
|
||||
));
|
||||
}
|
||||
} else {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::MissingToken,
|
||||
"Missing appservice token.",
|
||||
));
|
||||
}
|
||||
|
||||
user_id
|
||||
}
|
||||
_ => {
|
||||
warn!("Unsupported or unknown login type: {:?}", &body.login_info);
|
||||
|
@ -199,6 +227,15 @@ pub async fn logout_route(body: Ruma<logout::v3::Request>) -> Result<logout::v3:
|
|||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||
|
||||
if let Some(ref info) = body.appservice_info {
|
||||
if !info.is_user_match(sender_user) {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"User is not in namespace.",
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
services().users.remove_device(sender_user, sender_device)?;
|
||||
|
||||
Ok(logout::v3::Response::new())
|
||||
|
@ -220,6 +257,20 @@ pub async fn logout_all_route(
|
|||
) -> Result<logout_all::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
|
||||
if let Some(ref info) = body.appservice_info {
|
||||
if !info.is_user_match(sender_user) {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"User is not in namespace.",
|
||||
));
|
||||
}
|
||||
} else {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::MissingToken,
|
||||
"Missing appservice token.",
|
||||
));
|
||||
}
|
||||
|
||||
for device_id in services().users.all_device_ids(sender_user).flatten() {
|
||||
services().users.remove_device(sender_user, &device_id)?;
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ use ruma::{
|
|||
room::canonical_alias::RoomCanonicalAliasEventContent, AnyStateEventContent, StateEventType,
|
||||
},
|
||||
serde::Raw,
|
||||
EventId, RoomId, UserId,
|
||||
EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId,
|
||||
};
|
||||
use tracing::log::warn;
|
||||
|
||||
|
@ -32,6 +32,11 @@ pub async fn send_state_event_for_key_route(
|
|||
&body.event_type,
|
||||
&body.body.body, // Yes, I hate it too
|
||||
body.state_key.to_owned(),
|
||||
if body.appservice_info.is_some() {
|
||||
body.timestamp
|
||||
} else {
|
||||
None
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
@ -54,7 +59,7 @@ pub async fn send_state_event_for_empty_key_route(
|
|||
// Forbid m.room.encryption if encryption is disabled
|
||||
if body.event_type == StateEventType::RoomEncryption && !services().globals.allow_encryption() {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"Encryption has been disabled",
|
||||
));
|
||||
}
|
||||
|
@ -65,6 +70,11 @@ pub async fn send_state_event_for_empty_key_route(
|
|||
&body.event_type.to_string().into(),
|
||||
&body.body.body,
|
||||
body.state_key.to_owned(),
|
||||
if body.appservice_info.is_some() {
|
||||
body.timestamp
|
||||
} else {
|
||||
None
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
@ -88,7 +98,7 @@ pub async fn get_state_events_route(
|
|||
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"You don't have permission to view the room state.",
|
||||
));
|
||||
}
|
||||
|
@ -121,7 +131,7 @@ pub async fn get_state_events_for_key_route(
|
|||
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"You don't have permission to view the room state.",
|
||||
));
|
||||
}
|
||||
|
@ -160,7 +170,7 @@ pub async fn get_state_events_for_empty_key_route(
|
|||
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"You don't have permission to view the room state.",
|
||||
));
|
||||
}
|
||||
|
@ -190,6 +200,7 @@ async fn send_state_event_for_key_helper(
|
|||
event_type: &StateEventType,
|
||||
json: &Raw<AnyStateEventContent>,
|
||||
state_key: String,
|
||||
timestamp: Option<MilliSecondsSinceUnixEpoch>,
|
||||
) -> Result<Arc<EventId>> {
|
||||
let sender_user = sender;
|
||||
|
||||
|
@ -214,7 +225,7 @@ async fn send_state_event_for_key_helper(
|
|||
.is_none()
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"You are only allowed to send canonical_alias \
|
||||
events when it's aliases already exists",
|
||||
));
|
||||
|
@ -243,6 +254,7 @@ async fn send_state_event_for_key_helper(
|
|||
unsigned: None,
|
||||
state_key: Some(state_key),
|
||||
redacts: None,
|
||||
timestamp,
|
||||
},
|
||||
sender_user,
|
||||
room_id,
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
use crate::{
|
||||
service::rooms::timeline::PduCount, services, Error, PduEvent, Result, Ruma, RumaResponse,
|
||||
service::{pdu::EventHash, rooms::timeline::PduCount},
|
||||
services, utils, Error, PduEvent, Result, Ruma, RumaResponse,
|
||||
};
|
||||
|
||||
use ruma::{
|
||||
|
@ -11,7 +12,7 @@ use ruma::{
|
|||
Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom,
|
||||
LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State, Timeline, ToDevice,
|
||||
},
|
||||
v4::SlidingOp,
|
||||
v4::{SlidingOp, SlidingSyncRoomHero},
|
||||
DeviceLists, UnreadNotificationsCount,
|
||||
},
|
||||
uiaa::UiaaResponse,
|
||||
|
@ -21,7 +22,7 @@ use ruma::{
|
|||
StateEventType, TimelineEventType,
|
||||
},
|
||||
serde::Raw,
|
||||
uint, DeviceId, JsOption, OwnedDeviceId, OwnedUserId, RoomId, UInt, UserId,
|
||||
uint, DeviceId, EventId, JsOption, OwnedDeviceId, OwnedUserId, RoomId, UInt, UserId,
|
||||
};
|
||||
use std::{
|
||||
collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet},
|
||||
|
@ -296,8 +297,6 @@ async fn sync_helper(
|
|||
for result in all_left_rooms {
|
||||
let (room_id, _) = result?;
|
||||
|
||||
let mut left_state_events = Vec::new();
|
||||
|
||||
{
|
||||
// Get and drop the lock to wait for remaining operations to finish
|
||||
let mutex_insert = Arc::clone(
|
||||
|
@ -325,9 +324,48 @@ async fn sync_helper(
|
|||
|
||||
if !services().rooms.metadata.exists(&room_id)? {
|
||||
// This is just a rejected invite, not a room we know
|
||||
let event = PduEvent {
|
||||
event_id: EventId::new(services().globals.server_name()).into(),
|
||||
sender: sender_user.clone(),
|
||||
origin_server_ts: utils::millis_since_unix_epoch()
|
||||
.try_into()
|
||||
.expect("Timestamp is valid js_int value"),
|
||||
kind: TimelineEventType::RoomMember,
|
||||
content: serde_json::from_str(r#"{ "membership": "leave"}"#).unwrap(),
|
||||
state_key: Some(sender_user.to_string()),
|
||||
unsigned: None,
|
||||
// The following keys are dropped on conversion
|
||||
room_id: room_id.clone(),
|
||||
prev_events: vec![],
|
||||
depth: uint!(1),
|
||||
auth_events: vec![],
|
||||
redacts: None,
|
||||
hashes: EventHash {
|
||||
sha256: String::new(),
|
||||
},
|
||||
signatures: None,
|
||||
};
|
||||
|
||||
left_rooms.insert(
|
||||
room_id,
|
||||
LeftRoom {
|
||||
account_data: RoomAccountData { events: Vec::new() },
|
||||
timeline: Timeline {
|
||||
limited: false,
|
||||
prev_batch: Some(next_batch_string.clone()),
|
||||
events: Vec::new(),
|
||||
},
|
||||
state: State {
|
||||
events: vec![event.to_sync_state_event()],
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut left_state_events = Vec::new();
|
||||
|
||||
let since_shortstatehash = services()
|
||||
.rooms
|
||||
.user
|
||||
|
@ -678,7 +716,7 @@ async fn load_joined_room(
|
|||
.state_cache
|
||||
.is_invited(&user_id, room_id)?)
|
||||
{
|
||||
Ok::<_, Error>(Some(state_key.clone()))
|
||||
Ok::<_, Error>(Some(user_id))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
|
@ -1534,7 +1572,7 @@ pub async fn sync_events_v4_route(
|
|||
sender_user.clone(),
|
||||
sender_device.clone(),
|
||||
conn_id.clone(),
|
||||
body.room_subscriptions,
|
||||
body.room_subscriptions.clone(),
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -1600,33 +1638,37 @@ pub async fn sync_events_v4_route(
|
|||
.get_member(room_id, &member)
|
||||
.ok()
|
||||
.flatten()
|
||||
.map(|memberevent| {
|
||||
(
|
||||
memberevent
|
||||
.displayname
|
||||
.unwrap_or_else(|| member.to_string()),
|
||||
memberevent.avatar_url,
|
||||
)
|
||||
.map(|memberevent| SlidingSyncRoomHero {
|
||||
user_id: member,
|
||||
name: memberevent.displayname,
|
||||
avatar: memberevent.avatar_url,
|
||||
})
|
||||
})
|
||||
.take(5)
|
||||
.collect::<Vec<_>>();
|
||||
let name = match &heroes[..] {
|
||||
[] => None,
|
||||
[only] => Some(only.0.clone()),
|
||||
[only] => Some(
|
||||
only.name
|
||||
.clone()
|
||||
.unwrap_or_else(|| only.user_id.to_string()),
|
||||
),
|
||||
[firsts @ .., last] => Some(
|
||||
firsts
|
||||
.iter()
|
||||
.map(|h| h.0.clone())
|
||||
.map(|h| h.name.clone().unwrap_or_else(|| h.user_id.to_string()))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
+ " and "
|
||||
+ &last.0,
|
||||
+ &last
|
||||
.name
|
||||
.clone()
|
||||
.unwrap_or_else(|| last.user_id.to_string()),
|
||||
),
|
||||
};
|
||||
|
||||
let avatar = if let [only] = &heroes[..] {
|
||||
only.1.clone()
|
||||
only.avatar.clone()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
@ -1687,6 +1729,16 @@ pub async fn sync_events_v4_route(
|
|||
),
|
||||
num_live: None, // Count events in timeline greater than global sync counter
|
||||
timestamp: None,
|
||||
heroes: if body
|
||||
.room_subscriptions
|
||||
.get(room_id)
|
||||
.map(|sub| sub.include_heroes.unwrap_or_default())
|
||||
.unwrap_or_default()
|
||||
{
|
||||
Some(heroes)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
},
|
||||
);
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ pub async fn create_typing_event_route(
|
|||
.is_joined(sender_user, &body.room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"You are not in this room.",
|
||||
));
|
||||
}
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
use std::{collections::BTreeMap, iter::FromIterator};
|
||||
|
||||
use axum::{response::IntoResponse, Json};
|
||||
use ruma::api::client::{discovery::get_supported_versions, error::ErrorKind};
|
||||
use ruma::api::client::discovery::get_supported_versions;
|
||||
|
||||
use crate::{services, Error, Result, Ruma};
|
||||
use crate::{Result, Ruma};
|
||||
|
||||
/// # `GET /_matrix/client/versions`
|
||||
///
|
||||
|
@ -28,23 +27,11 @@ pub async fn get_supported_versions_route(
|
|||
"v1.4".to_owned(),
|
||||
"v1.5".to_owned(),
|
||||
],
|
||||
unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]),
|
||||
unstable_features: BTreeMap::from_iter([
|
||||
("org.matrix.e2e_cross_signing".to_owned(), true),
|
||||
("org.matrix.msc3916.stable".to_owned(), true),
|
||||
]),
|
||||
};
|
||||
|
||||
Ok(resp)
|
||||
}
|
||||
|
||||
/// # `GET /.well-known/matrix/client`
|
||||
pub async fn well_known_client_route(
|
||||
_body: Ruma<get_supported_versions::Request>,
|
||||
) -> Result<impl IntoResponse> {
|
||||
let client_url = match services().globals.well_known_client() {
|
||||
Some(url) => url.clone(),
|
||||
None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")),
|
||||
};
|
||||
|
||||
Ok(Json(serde_json::json!({
|
||||
"m.homeserver": {"base_url": client_url},
|
||||
"org.matrix.msc3575.proxy": {"url": client_url}
|
||||
})))
|
||||
}
|
||||
|
|
22
src/api/client_server/well_known.rs
Normal file
22
src/api/client_server/well_known.rs
Normal file
|
@ -0,0 +1,22 @@
|
|||
use ruma::api::client::discovery::discover_homeserver::{
|
||||
self, HomeserverInfo, SlidingSyncProxyInfo,
|
||||
};
|
||||
|
||||
use crate::{services, Result, Ruma};
|
||||
|
||||
/// # `GET /.well-known/matrix/client`
|
||||
///
|
||||
/// Returns the client server discovery information.
|
||||
pub async fn well_known_client(
|
||||
_body: Ruma<discover_homeserver::Request>,
|
||||
) -> Result<discover_homeserver::Response> {
|
||||
let client_url = services().globals.well_known_client();
|
||||
|
||||
Ok(discover_homeserver::Response {
|
||||
homeserver: HomeserverInfo {
|
||||
base_url: client_url.clone(),
|
||||
},
|
||||
identity_server: None,
|
||||
sliding_sync_proxy: Some(SlidingSyncProxyInfo { url: client_url }),
|
||||
})
|
||||
}
|
|
@ -2,20 +2,22 @@ use std::{collections::BTreeMap, iter::FromIterator, str};
|
|||
|
||||
use axum::{
|
||||
async_trait,
|
||||
body::{Full, HttpBody},
|
||||
extract::{rejection::TypedHeaderRejectionReason, FromRequest, Path, TypedHeader},
|
||||
headers::{
|
||||
authorization::{Bearer, Credentials},
|
||||
Authorization,
|
||||
},
|
||||
body::Body,
|
||||
extract::{FromRequest, Path},
|
||||
response::{IntoResponse, Response},
|
||||
BoxError, RequestExt, RequestPartsExt,
|
||||
RequestExt, RequestPartsExt,
|
||||
};
|
||||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||
use axum_extra::{
|
||||
headers::{authorization::Bearer, Authorization},
|
||||
typed_header::TypedHeaderRejectionReason,
|
||||
TypedHeader,
|
||||
};
|
||||
use bytes::{BufMut, BytesMut};
|
||||
use http::{Request, StatusCode};
|
||||
use ruma::{
|
||||
api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse},
|
||||
CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId,
|
||||
server_util::authorization::XMatrix,
|
||||
CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedUserId, UserId,
|
||||
};
|
||||
use serde::Deserialize;
|
||||
use tracing::{debug, error, warn};
|
||||
|
@ -31,37 +33,33 @@ enum Token {
|
|||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T, S, B> FromRequest<S, B> for Ruma<T>
|
||||
impl<T, S> FromRequest<S> for Ruma<T>
|
||||
where
|
||||
T: IncomingRequest,
|
||||
B: HttpBody + Send + 'static,
|
||||
B::Data: Send,
|
||||
B::Error: Into<BoxError>,
|
||||
{
|
||||
type Rejection = Error;
|
||||
|
||||
async fn from_request(req: Request<B>, _state: &S) -> Result<Self, Self::Rejection> {
|
||||
async fn from_request(req: Request<Body>, _state: &S) -> Result<Self, Self::Rejection> {
|
||||
#[derive(Deserialize)]
|
||||
struct QueryParams {
|
||||
access_token: Option<String>,
|
||||
user_id: Option<String>,
|
||||
}
|
||||
|
||||
let (mut parts, mut body) = match req.with_limited_body() {
|
||||
Ok(limited_req) => {
|
||||
let (parts, body) = limited_req.into_parts();
|
||||
let body = to_bytes(body)
|
||||
.await
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?;
|
||||
(parts, body)
|
||||
}
|
||||
Err(original_req) => {
|
||||
let (parts, body) = original_req.into_parts();
|
||||
let body = to_bytes(body)
|
||||
.await
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?;
|
||||
(parts, body)
|
||||
}
|
||||
let (mut parts, mut body) = {
|
||||
let limited_req = req.with_limited_body();
|
||||
let (parts, body) = limited_req.into_parts();
|
||||
let body = axum::body::to_bytes(
|
||||
body,
|
||||
services()
|
||||
.globals
|
||||
.max_request_size()
|
||||
.try_into()
|
||||
.unwrap_or(usize::MAX),
|
||||
)
|
||||
.await
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?;
|
||||
(parts, body)
|
||||
};
|
||||
|
||||
let metadata = T::METADATA;
|
||||
|
@ -99,21 +97,20 @@ where
|
|||
|
||||
let mut json_body = serde_json::from_slice::<CanonicalJsonValue>(&body).ok();
|
||||
|
||||
let (sender_user, sender_device, sender_servername, from_appservice) =
|
||||
let (sender_user, sender_device, sender_servername, appservice_info) =
|
||||
match (metadata.authentication, token) {
|
||||
(_, Token::Invalid) => {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::UnknownToken { soft_logout: false },
|
||||
"Unknown access token.",
|
||||
))
|
||||
// OpenID endpoint uses a query param with the same name, drop this once query params for user auth are removed from the spec
|
||||
if query_params.access_token.is_some() {
|
||||
(None, None, None, None)
|
||||
} else {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::UnknownToken { soft_logout: false },
|
||||
"Unknown access token.",
|
||||
));
|
||||
}
|
||||
}
|
||||
(
|
||||
AuthScheme::AccessToken
|
||||
| AuthScheme::AppserviceToken
|
||||
| AuthScheme::AccessTokenOptional
|
||||
| AuthScheme::None,
|
||||
Token::Appservice(info),
|
||||
) => {
|
||||
(AuthScheme::AccessToken, Token::Appservice(info)) => {
|
||||
let user_id = query_params
|
||||
.user_id
|
||||
.map_or_else(
|
||||
|
@ -128,16 +125,29 @@ where
|
|||
.map_err(|_| {
|
||||
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
|
||||
})?;
|
||||
|
||||
if !info.is_user_match(&user_id) {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"User is not in namespace.",
|
||||
));
|
||||
}
|
||||
|
||||
if !services().users.exists(&user_id)? {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"User does not exist.",
|
||||
));
|
||||
}
|
||||
|
||||
// TODO: Check if appservice is allowed to be that user
|
||||
(Some(user_id), None, None, true)
|
||||
(Some(user_id), None, None, Some(*info))
|
||||
}
|
||||
(
|
||||
AuthScheme::None
|
||||
| AuthScheme::AppserviceToken
|
||||
| AuthScheme::AccessTokenOptional,
|
||||
Token::Appservice(info),
|
||||
) => (None, None, None, Some(*info)),
|
||||
(AuthScheme::AccessToken, Token::None) => {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::MissingToken,
|
||||
|
@ -147,12 +157,8 @@ where
|
|||
(
|
||||
AuthScheme::AccessToken | AuthScheme::AccessTokenOptional | AuthScheme::None,
|
||||
Token::User((user_id, device_id)),
|
||||
) => (Some(user_id), Some(device_id), None, false),
|
||||
) => (Some(user_id), Some(device_id), None, None),
|
||||
(AuthScheme::ServerSignatures, Token::None) => {
|
||||
if !services().globals.allow_federation() {
|
||||
return Err(Error::bad_config("Federation is disabled."));
|
||||
}
|
||||
|
||||
let TypedHeader(Authorization(x_matrix)) = parts
|
||||
.extract::<TypedHeader<Authorization<XMatrix>>>()
|
||||
.await
|
||||
|
@ -169,17 +175,31 @@ where
|
|||
_ => "Unknown header-related error",
|
||||
};
|
||||
|
||||
Error::BadRequest(ErrorKind::Forbidden, msg)
|
||||
Error::BadRequest(ErrorKind::forbidden(), msg)
|
||||
})?;
|
||||
|
||||
if let Some(dest) = x_matrix.destination {
|
||||
if dest != services().globals.server_name() {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Unauthorized,
|
||||
"X-Matrix destination field does not match server name.",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let origin_signatures = BTreeMap::from_iter([(
|
||||
x_matrix.key.clone(),
|
||||
CanonicalJsonValue::String(x_matrix.sig),
|
||||
CanonicalJsonValue::String(x_matrix.sig.to_string()),
|
||||
)]);
|
||||
|
||||
let signatures = BTreeMap::from_iter([(
|
||||
x_matrix.origin.as_str().to_owned(),
|
||||
CanonicalJsonValue::Object(origin_signatures),
|
||||
CanonicalJsonValue::Object(
|
||||
origin_signatures
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v))
|
||||
.collect(),
|
||||
),
|
||||
)]);
|
||||
|
||||
let mut request_map = BTreeMap::from_iter([
|
||||
|
@ -214,7 +234,7 @@ where
|
|||
let keys_result = services()
|
||||
.rooms
|
||||
.event_handler
|
||||
.fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_owned()])
|
||||
.fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_string()], false)
|
||||
.await;
|
||||
|
||||
let keys = match keys_result {
|
||||
|
@ -222,17 +242,28 @@ where
|
|||
Err(e) => {
|
||||
warn!("Failed to fetch signing keys: {}", e);
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"Failed to fetch signing keys.",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let pub_key_map =
|
||||
BTreeMap::from_iter([(x_matrix.origin.as_str().to_owned(), keys)]);
|
||||
// Only verify_keys that are currently valid should be used for validating requests
|
||||
// as per MSC4029
|
||||
let pub_key_map = BTreeMap::from_iter([(
|
||||
x_matrix.origin.as_str().to_owned(),
|
||||
if keys.valid_until_ts > MilliSecondsSinceUnixEpoch::now() {
|
||||
keys.verify_keys
|
||||
.into_iter()
|
||||
.map(|(id, key)| (id, key.key))
|
||||
.collect()
|
||||
} else {
|
||||
BTreeMap::new()
|
||||
},
|
||||
)]);
|
||||
|
||||
match ruma::signatures::verify_json(&pub_key_map, &request_map) {
|
||||
Ok(()) => (None, None, Some(x_matrix.origin), false),
|
||||
Ok(()) => (None, None, Some(x_matrix.origin), None),
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"Failed to verify json request from {}: {}\n{:?}",
|
||||
|
@ -248,7 +279,7 @@ where
|
|||
}
|
||||
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"Failed to verify X-Matrix signatures.",
|
||||
));
|
||||
}
|
||||
|
@ -259,7 +290,7 @@ where
|
|||
| AuthScheme::AppserviceToken
|
||||
| AuthScheme::AccessTokenOptional,
|
||||
Token::None,
|
||||
) => (None, None, None, false),
|
||||
) => (None, None, None, None),
|
||||
(AuthScheme::ServerSignatures, Token::Appservice(_) | Token::User(_)) => {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Unauthorized,
|
||||
|
@ -274,7 +305,7 @@ where
|
|||
}
|
||||
};
|
||||
|
||||
let mut http_request = http::Request::builder().uri(parts.uri).method(parts.method);
|
||||
let mut http_request = Request::builder().uri(parts.uri).method(parts.method);
|
||||
*http_request.headers_mut().unwrap() = parts.headers;
|
||||
|
||||
if let Some(CanonicalJsonValue::Object(json_body)) = &mut json_body {
|
||||
|
@ -322,126 +353,17 @@ where
|
|||
sender_user,
|
||||
sender_device,
|
||||
sender_servername,
|
||||
from_appservice,
|
||||
appservice_info,
|
||||
json_body,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
struct XMatrix {
|
||||
origin: OwnedServerName,
|
||||
key: String, // KeyName?
|
||||
sig: String,
|
||||
}
|
||||
|
||||
impl Credentials for XMatrix {
|
||||
const SCHEME: &'static str = "X-Matrix";
|
||||
|
||||
fn decode(value: &http::HeaderValue) -> Option<Self> {
|
||||
debug_assert!(
|
||||
value.as_bytes().starts_with(b"X-Matrix "),
|
||||
"HeaderValue to decode should start with \"X-Matrix ..\", received = {value:?}",
|
||||
);
|
||||
|
||||
let parameters = str::from_utf8(&value.as_bytes()["X-Matrix ".len()..])
|
||||
.ok()?
|
||||
.trim_start();
|
||||
|
||||
let mut origin = None;
|
||||
let mut key = None;
|
||||
let mut sig = None;
|
||||
|
||||
for entry in parameters.split_terminator(',') {
|
||||
let (name, value) = entry.split_once('=')?;
|
||||
|
||||
// It's not at all clear why some fields are quoted and others not in the spec,
|
||||
// let's simply accept either form for every field.
|
||||
let value = value
|
||||
.strip_prefix('"')
|
||||
.and_then(|rest| rest.strip_suffix('"'))
|
||||
.unwrap_or(value);
|
||||
|
||||
// FIXME: Catch multiple fields of the same name
|
||||
match name {
|
||||
"origin" => origin = Some(value.try_into().ok()?),
|
||||
"key" => key = Some(value.to_owned()),
|
||||
"sig" => sig = Some(value.to_owned()),
|
||||
_ => debug!(
|
||||
"Unexpected field `{}` in X-Matrix Authorization header",
|
||||
name
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
Some(Self {
|
||||
origin: origin?,
|
||||
key: key?,
|
||||
sig: sig?,
|
||||
})
|
||||
}
|
||||
|
||||
fn encode(&self) -> http::HeaderValue {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: OutgoingResponse> IntoResponse for RumaResponse<T> {
|
||||
fn into_response(self) -> Response {
|
||||
match self.0.try_into_http_response::<BytesMut>() {
|
||||
Ok(res) => res.map(BytesMut::freeze).map(Full::new).into_response(),
|
||||
Ok(res) => res.map(BytesMut::freeze).map(Body::from).into_response(),
|
||||
Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// copied from hyper under the following license:
|
||||
// Copyright (c) 2014-2021 Sean McArthur
|
||||
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
pub(crate) async fn to_bytes<T>(body: T) -> Result<Bytes, T::Error>
|
||||
where
|
||||
T: HttpBody,
|
||||
{
|
||||
futures_util::pin_mut!(body);
|
||||
|
||||
// If there's only 1 chunk, we can just return Buf::to_bytes()
|
||||
let mut first = if let Some(buf) = body.data().await {
|
||||
buf?
|
||||
} else {
|
||||
return Ok(Bytes::new());
|
||||
};
|
||||
|
||||
let second = if let Some(buf) = body.data().await {
|
||||
buf?
|
||||
} else {
|
||||
return Ok(first.copy_to_bytes(first.remaining()));
|
||||
};
|
||||
|
||||
// With more than 1 buf, we gotta flatten into a Vec first.
|
||||
let cap = first.remaining() + second.remaining() + body.size_hint().lower() as usize;
|
||||
let mut vec = Vec::with_capacity(cap);
|
||||
vec.put(first);
|
||||
vec.put(second);
|
||||
|
||||
while let Some(buf) = body.data().await {
|
||||
vec.put(buf?);
|
||||
}
|
||||
|
||||
Ok(vec.into())
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use crate::Error;
|
||||
use crate::{service::appservice::RegistrationInfo, Error};
|
||||
use ruma::{
|
||||
api::client::uiaa::UiaaResponse, CanonicalJsonValue, OwnedDeviceId, OwnedServerName,
|
||||
OwnedUserId,
|
||||
|
@ -16,7 +16,7 @@ pub struct Ruma<T> {
|
|||
pub sender_servername: Option<OwnedServerName>,
|
||||
// This is None when body is not a valid string
|
||||
pub json_body: Option<CanonicalJsonValue>,
|
||||
pub from_appservice: bool,
|
||||
pub appservice_info: Option<RegistrationInfo>,
|
||||
}
|
||||
|
||||
impl<T> Deref for Ruma<T> {
|
||||
|
|
File diff suppressed because it is too large
Load diff
16
src/clap.rs
16
src/clap.rs
|
@ -2,9 +2,23 @@
|
|||
|
||||
use clap::Parser;
|
||||
|
||||
/// Returns the current version of the crate with extra info if supplied
|
||||
///
|
||||
/// Set the environment variable `CONDUIT_VERSION_EXTRA` to any UTF-8 string to
|
||||
/// include it in parenthesis after the SemVer version. A common value are git
|
||||
/// commit hashes.
|
||||
fn version() -> String {
|
||||
let cargo_pkg_version = env!("CARGO_PKG_VERSION");
|
||||
|
||||
match option_env!("CONDUIT_VERSION_EXTRA") {
|
||||
Some(x) => format!("{} ({})", cargo_pkg_version, x),
|
||||
None => cargo_pkg_version.to_owned(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Command line arguments
|
||||
#[derive(Parser)]
|
||||
#[clap(about, version)]
|
||||
#[clap(about, version = version())]
|
||||
pub struct Args {}
|
||||
|
||||
/// Parse command line arguments into structured data
|
||||
|
|
|
@ -7,6 +7,7 @@ use std::{
|
|||
use ruma::{OwnedServerName, RoomVersionId};
|
||||
use serde::{de::IgnoredAny, Deserialize};
|
||||
use tracing::warn;
|
||||
use url::Url;
|
||||
|
||||
mod proxy;
|
||||
|
||||
|
@ -21,7 +22,6 @@ pub struct Config {
|
|||
pub tls: Option<TlsConfig>,
|
||||
|
||||
pub server_name: OwnedServerName,
|
||||
#[serde(default = "default_database_backend")]
|
||||
pub database_backend: String,
|
||||
pub database_path: String,
|
||||
#[serde(default = "default_db_cache_capacity_mb")]
|
||||
|
@ -47,6 +47,8 @@ pub struct Config {
|
|||
#[serde(default = "false_fn")]
|
||||
pub allow_registration: bool,
|
||||
pub registration_token: Option<String>,
|
||||
#[serde(default = "default_openid_token_ttl")]
|
||||
pub openid_token_ttl: u64,
|
||||
#[serde(default = "true_fn")]
|
||||
pub allow_encryption: bool,
|
||||
#[serde(default = "false_fn")]
|
||||
|
@ -57,7 +59,8 @@ pub struct Config {
|
|||
pub allow_unstable_room_versions: bool,
|
||||
#[serde(default = "default_default_room_version")]
|
||||
pub default_room_version: RoomVersionId,
|
||||
pub well_known_client: Option<String>,
|
||||
#[serde(default, flatten)]
|
||||
pub well_known: WellKnownConfig,
|
||||
#[serde(default = "false_fn")]
|
||||
pub allow_jaeger: bool,
|
||||
#[serde(default = "false_fn")]
|
||||
|
@ -92,6 +95,14 @@ pub struct TlsConfig {
|
|||
pub key: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Default)]
|
||||
pub struct WellKnownConfig {
|
||||
#[serde(rename = "well_known_client")]
|
||||
pub client: Option<Url>,
|
||||
#[serde(rename = "well_known_server")]
|
||||
pub server: Option<OwnedServerName>,
|
||||
}
|
||||
|
||||
const DEPRECATED_KEYS: &[&str] = &["cache_capacity"];
|
||||
|
||||
impl Config {
|
||||
|
@ -112,9 +123,35 @@ impl Config {
|
|||
}
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn well_known_client(&self) -> String {
|
||||
if let Some(url) = &self.well_known.client {
|
||||
url.to_string()
|
||||
} else {
|
||||
format!("https://{}", self.server_name)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn well_known_server(&self) -> OwnedServerName {
|
||||
match &self.well_known.server {
|
||||
Some(server_name) => server_name.to_owned(),
|
||||
None => {
|
||||
if self.server_name.port().is_some() {
|
||||
self.server_name.to_owned()
|
||||
} else {
|
||||
format!("{}:443", self.server_name.host())
|
||||
.try_into()
|
||||
.expect("Host from valid hostname + :443 must be valid")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Config {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
// Prepare a list of config values to show
|
||||
let well_known_server = self.well_known_server();
|
||||
let lines = [
|
||||
("Server name", self.server_name.host()),
|
||||
("Database backend", &self.database_backend),
|
||||
|
@ -195,6 +232,8 @@ impl fmt::Display for Config {
|
|||
}
|
||||
&lst.join(", ")
|
||||
}),
|
||||
("Well-known server name", well_known_server.as_str()),
|
||||
("Well-known client URL", &self.well_known_client()),
|
||||
];
|
||||
|
||||
let mut msg: String = "Active config values:\n\n".to_owned();
|
||||
|
@ -223,10 +262,6 @@ fn default_port() -> u16 {
|
|||
8000
|
||||
}
|
||||
|
||||
fn default_database_backend() -> String {
|
||||
"sqlite".to_owned()
|
||||
}
|
||||
|
||||
fn default_db_cache_capacity_mb() -> f64 {
|
||||
300.0
|
||||
}
|
||||
|
@ -271,7 +306,11 @@ fn default_turn_ttl() -> u64 {
|
|||
60 * 60 * 24
|
||||
}
|
||||
|
||||
fn default_openid_token_ttl() -> u64 {
|
||||
60 * 60
|
||||
}
|
||||
|
||||
// I know, it's a great name
|
||||
pub fn default_default_room_version() -> RoomVersionId {
|
||||
RoomVersionId::V9
|
||||
RoomVersionId::V10
|
||||
}
|
||||
|
|
|
@ -38,7 +38,6 @@ pub trait KeyValueDatabaseEngine: Send + Sync {
|
|||
fn memory_usage(&self) -> Result<String> {
|
||||
Ok("Current database engine does not support memory usage reporting.".to_owned())
|
||||
}
|
||||
fn clear_caches(&self) {}
|
||||
}
|
||||
|
||||
pub trait KvTree: Send + Sync {
|
||||
|
|
|
@ -126,8 +126,6 @@ impl KeyValueDatabaseEngine for Arc<Engine> {
|
|||
self.cache.get_pinned_usage() as f64 / 1024.0 / 1024.0,
|
||||
))
|
||||
}
|
||||
|
||||
fn clear_caches(&self) {}
|
||||
}
|
||||
|
||||
impl RocksDbEngineTree<'_> {
|
||||
|
|
|
@ -13,8 +13,8 @@ use thread_local::ThreadLocal;
|
|||
use tracing::debug;
|
||||
|
||||
thread_local! {
|
||||
static READ_CONNECTION: RefCell<Option<&'static Connection>> = RefCell::new(None);
|
||||
static READ_CONNECTION_ITERATOR: RefCell<Option<&'static Connection>> = RefCell::new(None);
|
||||
static READ_CONNECTION: RefCell<Option<&'static Connection>> = const { RefCell::new(None) };
|
||||
static READ_CONNECTION_ITERATOR: RefCell<Option<&'static Connection>> = const { RefCell::new(None) };
|
||||
}
|
||||
|
||||
struct PreparedStatementIterator<'a> {
|
||||
|
|
|
@ -20,7 +20,7 @@ impl Watchers {
|
|||
let mut rx = match self.watchers.write().unwrap().entry(prefix.to_vec()) {
|
||||
hash_map::Entry::Occupied(o) => o.get().1.clone(),
|
||||
hash_map::Entry::Vacant(v) => {
|
||||
let (tx, rx) = tokio::sync::watch::channel(());
|
||||
let (tx, rx) = watch::channel(());
|
||||
v.insert((tx, rx.clone()));
|
||||
rx
|
||||
}
|
||||
|
|
|
@ -1,15 +1,19 @@
|
|||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use futures_util::{stream::FuturesUnordered, StreamExt};
|
||||
use lru_cache::LruCache;
|
||||
use ruma::{
|
||||
api::federation::discovery::{ServerSigningKeys, VerifyKey},
|
||||
api::federation::discovery::{OldVerifyKey, ServerSigningKeys},
|
||||
signatures::Ed25519KeyPair,
|
||||
DeviceId, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName, UserId,
|
||||
DeviceId, ServerName, UserId,
|
||||
};
|
||||
|
||||
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||
use crate::{
|
||||
database::KeyValueDatabase,
|
||||
service::{self, globals::SigningKeys},
|
||||
services, utils, Error, Result,
|
||||
};
|
||||
|
||||
pub const COUNTER: &[u8] = b"c";
|
||||
pub const LAST_CHECK_FOR_UPDATES_COUNT: &[u8] = b"u";
|
||||
|
@ -237,64 +241,97 @@ lasttimelinecount_cache: {lasttimelinecount_cache}\n"
|
|||
self.global.remove(b"keypair")
|
||||
}
|
||||
|
||||
fn add_signing_key(
|
||||
fn add_signing_key_from_trusted_server(
|
||||
&self,
|
||||
origin: &ServerName,
|
||||
new_keys: ServerSigningKeys,
|
||||
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
|
||||
// Not atomic, but this is not critical
|
||||
let signingkeys = self.server_signingkeys.get(origin.as_bytes())?;
|
||||
) -> Result<SigningKeys> {
|
||||
let prev_keys = self.server_signingkeys.get(origin.as_bytes())?;
|
||||
|
||||
let mut keys = signingkeys
|
||||
.and_then(|keys| serde_json::from_slice(&keys).ok())
|
||||
.unwrap_or_else(|| {
|
||||
// Just insert "now", it doesn't matter
|
||||
ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now())
|
||||
});
|
||||
Ok(
|
||||
if let Some(mut prev_keys) =
|
||||
prev_keys.and_then(|keys| serde_json::from_slice::<ServerSigningKeys>(&keys).ok())
|
||||
{
|
||||
let ServerSigningKeys {
|
||||
verify_keys,
|
||||
old_verify_keys,
|
||||
..
|
||||
} = new_keys;
|
||||
|
||||
let ServerSigningKeys {
|
||||
verify_keys,
|
||||
old_verify_keys,
|
||||
..
|
||||
} = new_keys;
|
||||
prev_keys.verify_keys.extend(verify_keys);
|
||||
prev_keys.old_verify_keys.extend(old_verify_keys);
|
||||
prev_keys.valid_until_ts = new_keys.valid_until_ts;
|
||||
|
||||
keys.verify_keys.extend(verify_keys);
|
||||
keys.old_verify_keys.extend(old_verify_keys);
|
||||
self.server_signingkeys.insert(
|
||||
origin.as_bytes(),
|
||||
&serde_json::to_vec(&prev_keys).expect("serversigningkeys can be serialized"),
|
||||
)?;
|
||||
|
||||
self.server_signingkeys.insert(
|
||||
origin.as_bytes(),
|
||||
&serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"),
|
||||
)?;
|
||||
prev_keys.into()
|
||||
} else {
|
||||
self.server_signingkeys.insert(
|
||||
origin.as_bytes(),
|
||||
&serde_json::to_vec(&new_keys).expect("serversigningkeys can be serialized"),
|
||||
)?;
|
||||
|
||||
let mut tree = keys.verify_keys;
|
||||
tree.extend(
|
||||
keys.old_verify_keys
|
||||
.into_iter()
|
||||
.map(|old| (old.0, VerifyKey::new(old.1.key))),
|
||||
);
|
||||
new_keys.into()
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
Ok(tree)
|
||||
fn add_signing_key_from_origin(
|
||||
&self,
|
||||
origin: &ServerName,
|
||||
new_keys: ServerSigningKeys,
|
||||
) -> Result<SigningKeys> {
|
||||
let prev_keys = self.server_signingkeys.get(origin.as_bytes())?;
|
||||
|
||||
Ok(
|
||||
if let Some(mut prev_keys) =
|
||||
prev_keys.and_then(|keys| serde_json::from_slice::<ServerSigningKeys>(&keys).ok())
|
||||
{
|
||||
let ServerSigningKeys {
|
||||
verify_keys,
|
||||
old_verify_keys,
|
||||
..
|
||||
} = new_keys;
|
||||
|
||||
// Moving `verify_keys` no longer present to `old_verify_keys`
|
||||
for (key_id, key) in prev_keys.verify_keys {
|
||||
if !verify_keys.contains_key(&key_id) {
|
||||
prev_keys
|
||||
.old_verify_keys
|
||||
.insert(key_id, OldVerifyKey::new(prev_keys.valid_until_ts, key.key));
|
||||
}
|
||||
}
|
||||
|
||||
prev_keys.verify_keys = verify_keys;
|
||||
prev_keys.old_verify_keys.extend(old_verify_keys);
|
||||
prev_keys.valid_until_ts = new_keys.valid_until_ts;
|
||||
|
||||
self.server_signingkeys.insert(
|
||||
origin.as_bytes(),
|
||||
&serde_json::to_vec(&prev_keys).expect("serversigningkeys can be serialized"),
|
||||
)?;
|
||||
|
||||
prev_keys.into()
|
||||
} else {
|
||||
self.server_signingkeys.insert(
|
||||
origin.as_bytes(),
|
||||
&serde_json::to_vec(&new_keys).expect("serversigningkeys can be serialized"),
|
||||
)?;
|
||||
|
||||
new_keys.into()
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server.
|
||||
fn signing_keys_for(
|
||||
&self,
|
||||
origin: &ServerName,
|
||||
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
|
||||
fn signing_keys_for(&self, origin: &ServerName) -> Result<Option<SigningKeys>> {
|
||||
let signingkeys = self
|
||||
.server_signingkeys
|
||||
.get(origin.as_bytes())?
|
||||
.and_then(|bytes| serde_json::from_slice(&bytes).ok())
|
||||
.map(|keys: ServerSigningKeys| {
|
||||
let mut tree = keys.verify_keys;
|
||||
tree.extend(
|
||||
keys.old_verify_keys
|
||||
.into_iter()
|
||||
.map(|old| (old.0, VerifyKey::new(old.1.key))),
|
||||
);
|
||||
tree
|
||||
})
|
||||
.unwrap_or_else(BTreeMap::new);
|
||||
.and_then(|bytes| serde_json::from_slice::<SigningKeys>(&bytes).ok());
|
||||
|
||||
Ok(signingkeys)
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use ruma::api::client::error::ErrorKind;
|
||||
use ruma::{api::client::error::ErrorKind, http_headers::ContentDisposition};
|
||||
|
||||
use crate::{database::KeyValueDatabase, service, utils, Error, Result};
|
||||
|
||||
|
@ -8,7 +8,7 @@ impl service::media::Data for KeyValueDatabase {
|
|||
mxc: String,
|
||||
width: u32,
|
||||
height: u32,
|
||||
content_disposition: Option<&str>,
|
||||
content_disposition: &ContentDisposition,
|
||||
content_type: Option<&str>,
|
||||
) -> Result<Vec<u8>> {
|
||||
let mut key = mxc.as_bytes().to_vec();
|
||||
|
@ -16,12 +16,7 @@ impl service::media::Data for KeyValueDatabase {
|
|||
key.extend_from_slice(&width.to_be_bytes());
|
||||
key.extend_from_slice(&height.to_be_bytes());
|
||||
key.push(0xff);
|
||||
key.extend_from_slice(
|
||||
content_disposition
|
||||
.as_ref()
|
||||
.map(|f| f.as_bytes())
|
||||
.unwrap_or_default(),
|
||||
);
|
||||
key.extend_from_slice(content_disposition.to_string().as_bytes());
|
||||
key.push(0xff);
|
||||
key.extend_from_slice(
|
||||
content_type
|
||||
|
@ -40,7 +35,7 @@ impl service::media::Data for KeyValueDatabase {
|
|||
mxc: String,
|
||||
width: u32,
|
||||
height: u32,
|
||||
) -> Result<(Option<String>, Option<String>, Vec<u8>)> {
|
||||
) -> Result<(ContentDisposition, Option<String>, Vec<u8>)> {
|
||||
let mut prefix = mxc.as_bytes().to_vec();
|
||||
prefix.push(0xff);
|
||||
prefix.extend_from_slice(&width.to_be_bytes());
|
||||
|
@ -68,15 +63,9 @@ impl service::media::Data for KeyValueDatabase {
|
|||
.next()
|
||||
.ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?;
|
||||
|
||||
let content_disposition = if content_disposition_bytes.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
utils::string_from_bytes(content_disposition_bytes).map_err(|_| {
|
||||
Error::bad_database("Content Disposition in mediaid_file is invalid unicode.")
|
||||
})?,
|
||||
)
|
||||
};
|
||||
let content_disposition = content_disposition_bytes.try_into().unwrap_or_else(|_| {
|
||||
ContentDisposition::new(ruma::http_headers::ContentDispositionType::Inline)
|
||||
});
|
||||
Ok((content_disposition, content_type, key))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,9 +1,15 @@
|
|||
use ruma::{api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId};
|
||||
use ruma::{
|
||||
api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId,
|
||||
UserId,
|
||||
};
|
||||
|
||||
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||
|
||||
impl service::rooms::alias::Data for KeyValueDatabase {
|
||||
fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> {
|
||||
fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()> {
|
||||
// Comes first as we don't want a stuck alias
|
||||
self.alias_userid
|
||||
.insert(alias.alias().as_bytes(), user_id.as_bytes())?;
|
||||
self.alias_roomid
|
||||
.insert(alias.alias().as_bytes(), room_id.as_bytes())?;
|
||||
let mut aliasid = room_id.as_bytes().to_vec();
|
||||
|
@ -22,13 +28,13 @@ impl service::rooms::alias::Data for KeyValueDatabase {
|
|||
self.aliasid_alias.remove(&key)?;
|
||||
}
|
||||
self.alias_roomid.remove(alias.alias().as_bytes())?;
|
||||
self.alias_userid.remove(alias.alias().as_bytes())
|
||||
} else {
|
||||
return Err(Error::BadRequest(
|
||||
Err(Error::BadRequest(
|
||||
ErrorKind::NotFound,
|
||||
"Alias does not exist.",
|
||||
));
|
||||
))
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedRoomId>> {
|
||||
|
@ -57,4 +63,16 @@ impl service::rooms::alias::Data for KeyValueDatabase {
|
|||
.map_err(|_| Error::bad_database("Invalid alias in aliasid_alias."))
|
||||
}))
|
||||
}
|
||||
|
||||
fn who_created_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedUserId>> {
|
||||
self.alias_userid
|
||||
.get(alias.alias().as_bytes())?
|
||||
.map(|bytes| {
|
||||
UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| {
|
||||
Error::bad_database("User ID in alias_userid is invalid unicode.")
|
||||
})?)
|
||||
.map_err(|_| Error::bad_database("User ID in alias_roomid is invalid."))
|
||||
})
|
||||
.transpose()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,24 +2,46 @@ use ruma::RoomId;
|
|||
|
||||
use crate::{database::KeyValueDatabase, service, services, utils, Result};
|
||||
|
||||
/// Splits a string into tokens used as keys in the search inverted index
|
||||
///
|
||||
/// This may be used to tokenize both message bodies (for indexing) or search
|
||||
/// queries (for querying).
|
||||
fn tokenize(body: &str) -> impl Iterator<Item = String> + '_ {
|
||||
body.split_terminator(|c: char| !c.is_alphanumeric())
|
||||
.filter(|s| !s.is_empty())
|
||||
.filter(|word| word.len() <= 50)
|
||||
.map(str::to_lowercase)
|
||||
}
|
||||
|
||||
impl service::rooms::search::Data for KeyValueDatabase {
|
||||
fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> {
|
||||
let mut batch = message_body
|
||||
.split_terminator(|c: char| !c.is_alphanumeric())
|
||||
.filter(|s| !s.is_empty())
|
||||
.filter(|word| word.len() <= 50)
|
||||
.map(str::to_lowercase)
|
||||
.map(|word| {
|
||||
let mut key = shortroomid.to_be_bytes().to_vec();
|
||||
key.extend_from_slice(word.as_bytes());
|
||||
key.push(0xff);
|
||||
key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here
|
||||
(key, Vec::new())
|
||||
});
|
||||
let mut batch = tokenize(message_body).map(|word| {
|
||||
let mut key = shortroomid.to_be_bytes().to_vec();
|
||||
key.extend_from_slice(word.as_bytes());
|
||||
key.push(0xff);
|
||||
key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here
|
||||
(key, Vec::new())
|
||||
});
|
||||
|
||||
self.tokenids.insert_batch(&mut batch)
|
||||
}
|
||||
|
||||
fn deindex_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> {
|
||||
let batch = tokenize(message_body).map(|word| {
|
||||
let mut key = shortroomid.to_be_bytes().to_vec();
|
||||
key.extend_from_slice(word.as_bytes());
|
||||
key.push(0xFF);
|
||||
key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here
|
||||
key
|
||||
});
|
||||
|
||||
for token in batch {
|
||||
self.tokenids.remove(&token)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn search_pdus<'a>(
|
||||
&'a self,
|
||||
room_id: &RoomId,
|
||||
|
@ -33,11 +55,7 @@ impl service::rooms::search::Data for KeyValueDatabase {
|
|||
.to_be_bytes()
|
||||
.to_vec();
|
||||
|
||||
let words: Vec<_> = search_string
|
||||
.split_terminator(|c: char| !c.is_alphanumeric())
|
||||
.filter(|s| !s.is_empty())
|
||||
.map(str::to_lowercase)
|
||||
.collect();
|
||||
let words: Vec<_> = tokenize(search_string).collect();
|
||||
|
||||
let iterators = words.clone().into_iter().map(move |word| {
|
||||
let mut prefix2 = prefix.clone();
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
use std::{collections::HashSet, sync::Arc};
|
||||
|
||||
use itertools::Itertools;
|
||||
use ruma::{
|
||||
events::{AnyStrippedStateEvent, AnySyncStateEvent},
|
||||
serde::Raw,
|
||||
|
@ -22,11 +21,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
|
|||
}
|
||||
|
||||
fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
||||
let roomid = room_id.as_bytes().to_vec();
|
||||
let mut roomid_prefix = room_id.as_bytes().to_vec();
|
||||
roomid_prefix.push(0xff);
|
||||
|
||||
let mut roomuser_id = roomid_prefix.clone();
|
||||
let mut roomuser_id = room_id.as_bytes().to_vec();
|
||||
roomuser_id.push(0xff);
|
||||
roomuser_id.extend_from_slice(user_id.as_bytes());
|
||||
|
||||
|
@ -41,16 +36,6 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
|
|||
self.userroomid_leftstate.remove(&userroom_id)?;
|
||||
self.roomuserid_leftcount.remove(&roomuser_id)?;
|
||||
|
||||
if self.roomuserid_joined.scan_prefix(roomid_prefix).count() == 0
|
||||
&& self
|
||||
.roomuserid_invitecount
|
||||
.scan_prefix(roomid_prefix)
|
||||
.count()
|
||||
== 0
|
||||
{
|
||||
self.roomid_inviteviaservers.remove(&roomid)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -59,7 +44,6 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
|
|||
user_id: &UserId,
|
||||
room_id: &RoomId,
|
||||
last_state: Option<Vec<Raw<AnyStrippedStateEvent>>>,
|
||||
invite_via: Option<Vec<OwnedServerName>>,
|
||||
) -> Result<()> {
|
||||
let mut roomuser_id = room_id.as_bytes().to_vec();
|
||||
roomuser_id.push(0xff);
|
||||
|
@ -83,30 +67,12 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
|
|||
self.userroomid_leftstate.remove(&userroom_id)?;
|
||||
self.roomuserid_leftcount.remove(&roomuser_id)?;
|
||||
|
||||
if let Some(servers) = invite_via {
|
||||
let mut prev_servers = self.servers_invite_via(room_id)?.unwrap_or(Vec::new());
|
||||
prev_servers.append(&mut servers);
|
||||
let servers = prev_servers.iter().rev().unique().rev().collect_vec();
|
||||
|
||||
let servers = servers
|
||||
.iter()
|
||||
.map(|server| server.as_bytes())
|
||||
.collect_vec()
|
||||
.join(&[0xff][..]);
|
||||
|
||||
self.roomid_inviteviaservers
|
||||
.insert(&room_id.as_bytes().to_vec(), &servers)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
||||
let roomid = room_id.as_bytes().to_vec();
|
||||
let mut roomid_prefix = room_id.as_bytes().to_vec();
|
||||
roomid_prefix.push(0xff);
|
||||
|
||||
let mut roomuser_id = roomid_prefix.clone();
|
||||
let mut roomuser_id = room_id.as_bytes().to_vec();
|
||||
roomuser_id.push(0xff);
|
||||
roomuser_id.extend_from_slice(user_id.as_bytes());
|
||||
|
||||
let mut userroom_id = user_id.as_bytes().to_vec();
|
||||
|
@ -126,16 +92,6 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
|
|||
self.userroomid_invitestate.remove(&userroom_id)?;
|
||||
self.roomuserid_invitecount.remove(&roomuser_id)?;
|
||||
|
||||
if self.roomuserid_joined.scan_prefix(roomid_prefix).count() == 0
|
||||
&& self
|
||||
.roomuserid_invitecount
|
||||
.scan_prefix(roomid_prefix)
|
||||
.count()
|
||||
== 0
|
||||
{
|
||||
self.roomid_inviteviaservers.remove(&roomid)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -648,41 +604,4 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
|
|||
|
||||
Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
fn servers_invite_via(&self, room_id: &RoomId) -> Result<Option<Vec<OwnedServerName>>> {
|
||||
let room_id = room_id.as_bytes().to_vec();
|
||||
|
||||
self.roomid_inviteviaservers
|
||||
.get(&room_id)?
|
||||
.map(|servers| {
|
||||
let state = serde_json::from_slice(&servers)
|
||||
.map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?;
|
||||
|
||||
Ok(state)
|
||||
})
|
||||
.transpose()
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
fn add_servers_invite_via(
|
||||
&self,
|
||||
room_id: &RoomId,
|
||||
servers: &Vec<OwnedServerName>,
|
||||
) -> Result<()> {
|
||||
let mut prev_servers = self.servers_invite_via(room_id)?.unwrap_or(Vec::new());
|
||||
prev_servers.append(&mut servers);
|
||||
let servers = prev_servers.iter().rev().unique().rev().collect_vec();
|
||||
|
||||
let servers = servers
|
||||
.iter()
|
||||
.map(|server| server.as_bytes())
|
||||
.collect_vec()
|
||||
.join(&[0xff][..]);
|
||||
|
||||
self.roomid_inviteviaservers
|
||||
.insert(&room_id.as_bytes().to_vec(), &servers)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ impl service::uiaa::Data for KeyValueDatabase {
|
|||
.userdevicesessionid_uiaainfo
|
||||
.get(&userdevicesessionid)?
|
||||
.ok_or(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"UIAA session does not exist.",
|
||||
))?,
|
||||
)
|
||||
|
|
|
@ -11,6 +11,7 @@ use ruma::{
|
|||
use tracing::warn;
|
||||
|
||||
use crate::{
|
||||
api::client_server::TOKEN_LENGTH,
|
||||
database::KeyValueDatabase,
|
||||
service::{self, users::clean_signatures},
|
||||
services, utils, Error, Result,
|
||||
|
@ -943,6 +944,52 @@ impl service::users::Data for KeyValueDatabase {
|
|||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
// Creates an OpenID token, which can be used to prove that a user has access to an account (primarily for integrations)
|
||||
fn create_openid_token(&self, user_id: &UserId) -> Result<(String, u64)> {
|
||||
let token = utils::random_string(TOKEN_LENGTH);
|
||||
|
||||
let expires_in = services().globals.config.openid_token_ttl;
|
||||
let expires_at = utils::millis_since_unix_epoch()
|
||||
.checked_add(expires_in * 1000)
|
||||
.expect("time is valid");
|
||||
|
||||
let mut value = expires_at.to_be_bytes().to_vec();
|
||||
value.extend_from_slice(user_id.as_bytes());
|
||||
|
||||
self.openidtoken_expiresatuserid
|
||||
.insert(token.as_bytes(), value.as_slice())?;
|
||||
|
||||
Ok((token, expires_in))
|
||||
}
|
||||
|
||||
/// Find out which user an OpenID access token belongs to.
|
||||
fn find_from_openid_token(&self, token: &str) -> Result<Option<OwnedUserId>> {
|
||||
let Some(value) = self.openidtoken_expiresatuserid.get(token.as_bytes())? else {
|
||||
return Ok(None);
|
||||
};
|
||||
let (expires_at_bytes, user_bytes) = value.split_at(0u64.to_be_bytes().len());
|
||||
|
||||
let expires_at = u64::from_be_bytes(
|
||||
expires_at_bytes
|
||||
.try_into()
|
||||
.map_err(|_| Error::bad_database("expires_at in openid_userid is invalid u64."))?,
|
||||
);
|
||||
|
||||
if expires_at < utils::millis_since_unix_epoch() {
|
||||
self.openidtoken_expiresatuserid.remove(token.as_bytes())?;
|
||||
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
Some(
|
||||
UserId::parse(utils::string_from_bytes(user_bytes).map_err(|_| {
|
||||
Error::bad_database("User ID in openid_userid is invalid unicode.")
|
||||
})?)
|
||||
.map_err(|_| Error::bad_database("User ID in openid_userid is invalid.")),
|
||||
)
|
||||
.transpose()
|
||||
}
|
||||
}
|
||||
|
||||
impl KeyValueDatabase {}
|
||||
|
|
|
@ -6,6 +6,7 @@ use crate::{
|
|||
SERVICES,
|
||||
};
|
||||
use abstraction::{KeyValueDatabaseEngine, KvTree};
|
||||
use base64::{engine::general_purpose, Engine};
|
||||
use directories::ProjectDirs;
|
||||
use lru_cache::LruCache;
|
||||
|
||||
|
@ -57,6 +58,7 @@ pub struct KeyValueDatabase {
|
|||
pub(super) userid_masterkeyid: Arc<dyn KvTree>,
|
||||
pub(super) userid_selfsigningkeyid: Arc<dyn KvTree>,
|
||||
pub(super) userid_usersigningkeyid: Arc<dyn KvTree>,
|
||||
pub(super) openidtoken_expiresatuserid: Arc<dyn KvTree>, // expiresatuserid = expiresat + userid
|
||||
|
||||
pub(super) userfilterid_filter: Arc<dyn KvTree>, // UserFilterId = UserId + FilterId
|
||||
|
||||
|
@ -100,6 +102,8 @@ pub struct KeyValueDatabase {
|
|||
pub(super) userroomid_leftstate: Arc<dyn KvTree>,
|
||||
pub(super) roomuserid_leftcount: Arc<dyn KvTree>,
|
||||
|
||||
pub(super) alias_userid: Arc<dyn KvTree>, // User who created the alias
|
||||
|
||||
pub(super) disabledroomids: Arc<dyn KvTree>, // Rooms where incoming federation handling is disabled
|
||||
|
||||
pub(super) lazyloadedids: Arc<dyn KvTree>, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId
|
||||
|
@ -161,8 +165,6 @@ pub struct KeyValueDatabase {
|
|||
//pub pusher: pusher::PushData,
|
||||
pub(super) senderkey_pusher: Arc<dyn KvTree>,
|
||||
|
||||
pub(super) roomid_inviteviaservers: Arc<dyn KvTree>,
|
||||
|
||||
pub(super) pdu_cache: Mutex<LruCache<OwnedEventId, Arc<PduEvent>>>,
|
||||
pub(super) shorteventid_cache: Mutex<LruCache<u64, Arc<EventId>>>,
|
||||
pub(super) auth_chain_cache: Mutex<LruCache<Vec<u64>, Arc<HashSet<u64>>>>,
|
||||
|
@ -239,7 +241,7 @@ impl KeyValueDatabase {
|
|||
Self::check_db_setup(&config)?;
|
||||
|
||||
if !Path::new(&config.database_path).exists() {
|
||||
std::fs::create_dir_all(&config.database_path)
|
||||
fs::create_dir_all(&config.database_path)
|
||||
.map_err(|_| Error::BadConfig("Database folder doesn't exists and couldn't be created (e.g. due to missing permissions). Please create the database folder yourself."))?;
|
||||
}
|
||||
|
||||
|
@ -292,6 +294,7 @@ impl KeyValueDatabase {
|
|||
userid_masterkeyid: builder.open_tree("userid_masterkeyid")?,
|
||||
userid_selfsigningkeyid: builder.open_tree("userid_selfsigningkeyid")?,
|
||||
userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?,
|
||||
openidtoken_expiresatuserid: builder.open_tree("openidtoken_expiresatuserid")?,
|
||||
userfilterid_filter: builder.open_tree("userfilterid_filter")?,
|
||||
todeviceid_events: builder.open_tree("todeviceid_events")?,
|
||||
|
||||
|
@ -327,6 +330,8 @@ impl KeyValueDatabase {
|
|||
userroomid_leftstate: builder.open_tree("userroomid_leftstate")?,
|
||||
roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?,
|
||||
|
||||
alias_userid: builder.open_tree("alias_userid")?,
|
||||
|
||||
disabledroomids: builder.open_tree("disabledroomids")?,
|
||||
|
||||
lazyloadedids: builder.open_tree("lazyloadedids")?,
|
||||
|
@ -370,8 +375,6 @@ impl KeyValueDatabase {
|
|||
global: builder.open_tree("global")?,
|
||||
server_signingkeys: builder.open_tree("server_signingkeys")?,
|
||||
|
||||
roomid_inviteviaservers: builder.open_tree("roomid_inviteviaservers")?,
|
||||
|
||||
pdu_cache: Mutex::new(LruCache::new(
|
||||
config
|
||||
.pdu_cache_capacity
|
||||
|
@ -408,11 +411,9 @@ impl KeyValueDatabase {
|
|||
// Matrix resource ownership is based on the server name; changing it
|
||||
// requires recreating the database from scratch.
|
||||
if services().users.count()? > 0 {
|
||||
let conduit_user =
|
||||
UserId::parse_with_server_name("conduit", services().globals.server_name())
|
||||
.expect("@conduit:server_name is valid");
|
||||
let conduit_user = services().globals.server_user();
|
||||
|
||||
if !services().users.exists(&conduit_user)? {
|
||||
if !services().users.exists(conduit_user)? {
|
||||
error!(
|
||||
"The {} server user does not exist, and the database is not new.",
|
||||
conduit_user
|
||||
|
@ -424,7 +425,7 @@ impl KeyValueDatabase {
|
|||
}
|
||||
|
||||
// If the database has any data, perform data migrations before starting
|
||||
let latest_database_version = 13;
|
||||
let latest_database_version = 16;
|
||||
|
||||
if services().users.count()? > 0 {
|
||||
// MIGRATIONS
|
||||
|
@ -850,7 +851,7 @@ impl KeyValueDatabase {
|
|||
let rule = rules_list.content.get(content_rule_transformation[0]);
|
||||
if rule.is_some() {
|
||||
let mut rule = rule.unwrap().clone();
|
||||
rule.rule_id = content_rule_transformation[1].to_owned();
|
||||
content_rule_transformation[1].clone_into(&mut rule.rule_id);
|
||||
rules_list
|
||||
.content
|
||||
.shift_remove(content_rule_transformation[0]);
|
||||
|
@ -875,7 +876,7 @@ impl KeyValueDatabase {
|
|||
let rule = rules_list.underride.get(transformation[0]);
|
||||
if let Some(rule) = rule {
|
||||
let mut rule = rule.clone();
|
||||
rule.rule_id = transformation[1].to_owned();
|
||||
transformation[1].clone_into(&mut rule.rule_id);
|
||||
rules_list.underride.shift_remove(transformation[0]);
|
||||
rules_list.underride.insert(rule);
|
||||
}
|
||||
|
@ -922,7 +923,7 @@ impl KeyValueDatabase {
|
|||
let mut account_data =
|
||||
serde_json::from_str::<PushRulesEvent>(raw_rules_list.get()).unwrap();
|
||||
|
||||
let user_default_rules = ruma::push::Ruleset::server_default(&user);
|
||||
let user_default_rules = Ruleset::server_default(&user);
|
||||
account_data
|
||||
.content
|
||||
.global
|
||||
|
@ -941,6 +942,86 @@ impl KeyValueDatabase {
|
|||
warn!("Migration: 12 -> 13 finished");
|
||||
}
|
||||
|
||||
if services().globals.database_version()? < 16 {
|
||||
// Reconstruct all media using the filesystem
|
||||
db.mediaid_file.clear().unwrap();
|
||||
|
||||
for file in fs::read_dir(services().globals.get_media_folder()).unwrap() {
|
||||
let file = file.unwrap();
|
||||
let mediaid = general_purpose::URL_SAFE_NO_PAD
|
||||
.decode(file.file_name().into_string().unwrap())
|
||||
.unwrap();
|
||||
|
||||
let mut parts = mediaid.rsplit(|&b| b == 0xff);
|
||||
|
||||
let mut removed_bytes = 0;
|
||||
|
||||
let content_type_bytes = parts.next().unwrap();
|
||||
removed_bytes += content_type_bytes.len() + 1;
|
||||
|
||||
let content_disposition_bytes = parts
|
||||
.next()
|
||||
.ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?;
|
||||
removed_bytes += content_disposition_bytes.len();
|
||||
|
||||
let mut content_disposition =
|
||||
utils::string_from_bytes(content_disposition_bytes).map_err(|_| {
|
||||
Error::bad_database("Content Disposition in mediaid_file is invalid.")
|
||||
})?;
|
||||
|
||||
if content_disposition.contains("filename=")
|
||||
&& !content_disposition.contains("filename=\"")
|
||||
{
|
||||
println!("{}", &content_disposition);
|
||||
content_disposition =
|
||||
content_disposition.replacen("filename=", "filename=\"", 1);
|
||||
content_disposition.push('"');
|
||||
println!("{}", &content_disposition);
|
||||
|
||||
let mut new_key = mediaid[..(mediaid.len() - removed_bytes)].to_vec();
|
||||
assert!(*new_key.last().unwrap() == 0xff);
|
||||
|
||||
let mut shorter_key = new_key.clone();
|
||||
shorter_key.extend(
|
||||
ruma::http_headers::ContentDisposition::new(
|
||||
ruma::http_headers::ContentDispositionType::Inline,
|
||||
)
|
||||
.to_string()
|
||||
.as_bytes(),
|
||||
);
|
||||
shorter_key.push(0xff);
|
||||
shorter_key.extend_from_slice(content_type_bytes);
|
||||
|
||||
new_key.extend_from_slice(content_disposition.to_string().as_bytes());
|
||||
new_key.push(0xff);
|
||||
new_key.extend_from_slice(content_type_bytes);
|
||||
|
||||
// Some file names are too long. Ignore those.
|
||||
match fs::rename(
|
||||
services().globals.get_media_file(&mediaid),
|
||||
services().globals.get_media_file(&new_key),
|
||||
) {
|
||||
Ok(_) => {
|
||||
db.mediaid_file.insert(&new_key, &[])?;
|
||||
}
|
||||
Err(_) => {
|
||||
fs::rename(
|
||||
services().globals.get_media_file(&mediaid),
|
||||
services().globals.get_media_file(&shorter_key),
|
||||
)
|
||||
.unwrap();
|
||||
db.mediaid_file.insert(&shorter_key, &[])?;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
db.mediaid_file.insert(&mediaid, &[])?;
|
||||
}
|
||||
}
|
||||
services().globals.bump_database_version(16)?;
|
||||
|
||||
warn!("Migration: 13 -> 16 finished");
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
services().globals.database_version().unwrap(),
|
||||
latest_database_version
|
||||
|
@ -1106,22 +1187,21 @@ impl KeyValueDatabase {
|
|||
|
||||
/// Sets the emergency password and push rules for the @conduit account in case emergency password is set
|
||||
fn set_emergency_access() -> Result<bool> {
|
||||
let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name())
|
||||
.expect("@conduit:server_name is a valid UserId");
|
||||
let conduit_user = services().globals.server_user();
|
||||
|
||||
services().users.set_password(
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
services().globals.emergency_password().as_deref(),
|
||||
)?;
|
||||
|
||||
let (ruleset, res) = match services().globals.emergency_password() {
|
||||
Some(_) => (Ruleset::server_default(&conduit_user), Ok(true)),
|
||||
Some(_) => (Ruleset::server_default(conduit_user), Ok(true)),
|
||||
None => (Ruleset::new(), Ok(false)),
|
||||
};
|
||||
|
||||
services().account_data.update(
|
||||
None,
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||
&serde_json::to_value(&GlobalAccountDataEvent {
|
||||
content: PushRulesEventContent { global: ruleset },
|
||||
|
|
133
src/main.rs
133
src/main.rs
|
@ -1,9 +1,11 @@
|
|||
use std::{future::Future, io, net::SocketAddr, sync::atomic, time::Duration};
|
||||
|
||||
use axum::{
|
||||
body::Body,
|
||||
extract::{DefaultBodyLimit, FromRequestParts, MatchedPath},
|
||||
response::IntoResponse,
|
||||
routing::{get, on, MethodFilter},
|
||||
middleware::map_response,
|
||||
response::{IntoResponse, Response},
|
||||
routing::{any, get, on, MethodFilter},
|
||||
Router,
|
||||
};
|
||||
use axum_server::{bind, bind_rustls, tls_rustls::RustlsConfig, Handle as ServerHandle};
|
||||
|
@ -13,7 +15,7 @@ use figment::{
|
|||
Figment,
|
||||
};
|
||||
use http::{
|
||||
header::{self, HeaderName},
|
||||
header::{self, HeaderName, CONTENT_SECURITY_POLICY},
|
||||
Method, StatusCode, Uri,
|
||||
};
|
||||
use ruma::api::{
|
||||
|
@ -55,7 +57,7 @@ async fn main() {
|
|||
))
|
||||
.nested(),
|
||||
)
|
||||
.merge(Env::prefixed("CONDUIT_").global());
|
||||
.merge(Env::prefixed("CONDUIT_").global().split("__"));
|
||||
|
||||
let config = match raw_config.extract::<Config>() {
|
||||
Ok(s) => s,
|
||||
|
@ -68,11 +70,13 @@ async fn main() {
|
|||
config.warn_deprecated();
|
||||
|
||||
if config.allow_jaeger {
|
||||
opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new());
|
||||
let tracer = opentelemetry_jaeger::new_agent_pipeline()
|
||||
.with_auto_split_batch(true)
|
||||
.with_service_name("conduit")
|
||||
.install_batch(opentelemetry::runtime::Tokio)
|
||||
opentelemetry::global::set_text_map_propagator(
|
||||
opentelemetry_jaeger_propagator::Propagator::new(),
|
||||
);
|
||||
let tracer = opentelemetry_otlp::new_pipeline()
|
||||
.tracing()
|
||||
.with_exporter(opentelemetry_otlp::new_exporter().tonic())
|
||||
.install_batch(opentelemetry_sdk::runtime::Tokio)
|
||||
.unwrap();
|
||||
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
|
||||
|
||||
|
@ -141,6 +145,13 @@ async fn main() {
|
|||
}
|
||||
}
|
||||
|
||||
/// Adds additional headers to prevent any potential XSS attacks via the media repo
|
||||
async fn set_csp_header(response: Response) -> impl IntoResponse {
|
||||
(
|
||||
[(CONTENT_SECURITY_POLICY, "sandbox; default-src 'none'; script-src 'none'; plugin-types application/pdf; style-src 'unsafe-inline'; object-src 'self';")], response
|
||||
)
|
||||
}
|
||||
|
||||
async fn run_server() -> io::Result<()> {
|
||||
let config = &services().globals.config;
|
||||
let addr = SocketAddr::from((config.address, config.port));
|
||||
|
@ -181,6 +192,7 @@ async fn run_server() -> io::Result<()> {
|
|||
])
|
||||
.max_age(Duration::from_secs(86400)),
|
||||
)
|
||||
.layer(map_response(set_csp_header))
|
||||
.layer(DefaultBodyLimit::max(
|
||||
config
|
||||
.max_request_size
|
||||
|
@ -188,7 +200,7 @@ async fn run_server() -> io::Result<()> {
|
|||
.expect("failed to convert max request size"),
|
||||
));
|
||||
|
||||
let app = routes().layer(middlewares).into_make_service();
|
||||
let app = routes(config).layer(middlewares).into_make_service();
|
||||
let handle = ServerHandle::new();
|
||||
|
||||
tokio::spawn(shutdown_signal(handle.clone()));
|
||||
|
@ -216,10 +228,10 @@ async fn run_server() -> io::Result<()> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
async fn spawn_task<B: Send + 'static>(
|
||||
req: axum::http::Request<B>,
|
||||
next: axum::middleware::Next<B>,
|
||||
) -> std::result::Result<axum::response::Response, StatusCode> {
|
||||
async fn spawn_task(
|
||||
req: http::Request<Body>,
|
||||
next: axum::middleware::Next,
|
||||
) -> std::result::Result<Response, StatusCode> {
|
||||
if services().globals.shutdown.load(atomic::Ordering::Relaxed) {
|
||||
return Err(StatusCode::SERVICE_UNAVAILABLE);
|
||||
}
|
||||
|
@ -228,14 +240,14 @@ async fn spawn_task<B: Send + 'static>(
|
|||
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)
|
||||
}
|
||||
|
||||
async fn unrecognized_method<B: Send>(
|
||||
req: axum::http::Request<B>,
|
||||
next: axum::middleware::Next<B>,
|
||||
) -> std::result::Result<axum::response::Response, StatusCode> {
|
||||
async fn unrecognized_method(
|
||||
req: http::Request<Body>,
|
||||
next: axum::middleware::Next,
|
||||
) -> std::result::Result<Response, StatusCode> {
|
||||
let method = req.method().clone();
|
||||
let uri = req.uri().clone();
|
||||
let inner = next.run(req).await;
|
||||
if inner.status() == axum::http::StatusCode::METHOD_NOT_ALLOWED {
|
||||
if inner.status() == StatusCode::METHOD_NOT_ALLOWED {
|
||||
warn!("Method not allowed: {method} {uri}");
|
||||
return Ok(RumaResponse(UiaaResponse::MatrixError(RumaError {
|
||||
body: ErrorBody::Standard {
|
||||
|
@ -249,8 +261,8 @@ async fn unrecognized_method<B: Send>(
|
|||
Ok(inner)
|
||||
}
|
||||
|
||||
fn routes() -> Router {
|
||||
Router::new()
|
||||
fn routes(config: &Config) -> Router {
|
||||
let router = Router::new()
|
||||
.ruma_route(client_server::get_supported_versions_route)
|
||||
.ruma_route(client_server::get_register_available_route)
|
||||
.ruma_route(client_server::register_route)
|
||||
|
@ -277,6 +289,7 @@ fn routes() -> Router {
|
|||
.ruma_route(client_server::get_room_aliases_route)
|
||||
.ruma_route(client_server::get_filter_route)
|
||||
.ruma_route(client_server::create_filter_route)
|
||||
.ruma_route(client_server::create_openid_token_route)
|
||||
.ruma_route(client_server::set_global_account_data_route)
|
||||
.ruma_route(client_server::set_room_account_data_route)
|
||||
.ruma_route(client_server::get_global_account_data_route)
|
||||
|
@ -366,10 +379,14 @@ fn routes() -> Router {
|
|||
.ruma_route(client_server::turn_server_route)
|
||||
.ruma_route(client_server::send_event_to_device_route)
|
||||
.ruma_route(client_server::get_media_config_route)
|
||||
.ruma_route(client_server::get_media_config_auth_route)
|
||||
.ruma_route(client_server::create_content_route)
|
||||
.ruma_route(client_server::get_content_route)
|
||||
.ruma_route(client_server::get_content_auth_route)
|
||||
.ruma_route(client_server::get_content_as_filename_route)
|
||||
.ruma_route(client_server::get_content_as_filename_auth_route)
|
||||
.ruma_route(client_server::get_content_thumbnail_route)
|
||||
.ruma_route(client_server::get_content_thumbnail_auth_route)
|
||||
.ruma_route(client_server::get_devices_route)
|
||||
.ruma_route(client_server::get_device_route)
|
||||
.ruma_route(client_server::update_device_route)
|
||||
|
@ -390,33 +407,7 @@ fn routes() -> Router {
|
|||
.ruma_route(client_server::get_relating_events_with_rel_type_route)
|
||||
.ruma_route(client_server::get_relating_events_route)
|
||||
.ruma_route(client_server::get_hierarchy_route)
|
||||
.ruma_route(server_server::get_server_version_route)
|
||||
.route(
|
||||
"/_matrix/key/v2/server",
|
||||
get(server_server::get_server_keys_route),
|
||||
)
|
||||
.route(
|
||||
"/_matrix/key/v2/server/:key_id",
|
||||
get(server_server::get_server_keys_deprecated_route),
|
||||
)
|
||||
.ruma_route(server_server::get_public_rooms_route)
|
||||
.ruma_route(server_server::get_public_rooms_filtered_route)
|
||||
.ruma_route(server_server::send_transaction_message_route)
|
||||
.ruma_route(server_server::get_event_route)
|
||||
.ruma_route(server_server::get_backfill_route)
|
||||
.ruma_route(server_server::get_missing_events_route)
|
||||
.ruma_route(server_server::get_event_authorization_route)
|
||||
.ruma_route(server_server::get_room_state_route)
|
||||
.ruma_route(server_server::get_room_state_ids_route)
|
||||
.ruma_route(server_server::create_join_event_template_route)
|
||||
.ruma_route(server_server::create_join_event_v1_route)
|
||||
.ruma_route(server_server::create_join_event_v2_route)
|
||||
.ruma_route(server_server::create_invite_route)
|
||||
.ruma_route(server_server::get_devices_route)
|
||||
.ruma_route(server_server::get_room_information_route)
|
||||
.ruma_route(server_server::get_profile_information_route)
|
||||
.ruma_route(server_server::get_keys_route)
|
||||
.ruma_route(server_server::claim_keys_route)
|
||||
.ruma_route(client_server::well_known_client)
|
||||
.route(
|
||||
"/_matrix/client/r0/rooms/:room_id/initialSync",
|
||||
get(initial_sync),
|
||||
|
@ -426,7 +417,47 @@ fn routes() -> Router {
|
|||
get(initial_sync),
|
||||
)
|
||||
.route("/", get(it_works))
|
||||
.fallback(not_found)
|
||||
.fallback(not_found);
|
||||
|
||||
if config.allow_federation {
|
||||
router
|
||||
.ruma_route(server_server::get_server_version_route)
|
||||
.route(
|
||||
"/_matrix/key/v2/server",
|
||||
get(server_server::get_server_keys_route),
|
||||
)
|
||||
.route(
|
||||
"/_matrix/key/v2/server/:key_id",
|
||||
get(server_server::get_server_keys_deprecated_route),
|
||||
)
|
||||
.ruma_route(server_server::get_public_rooms_route)
|
||||
.ruma_route(server_server::get_public_rooms_filtered_route)
|
||||
.ruma_route(server_server::send_transaction_message_route)
|
||||
.ruma_route(server_server::get_event_route)
|
||||
.ruma_route(server_server::get_backfill_route)
|
||||
.ruma_route(server_server::get_missing_events_route)
|
||||
.ruma_route(server_server::get_event_authorization_route)
|
||||
.ruma_route(server_server::get_room_state_route)
|
||||
.ruma_route(server_server::get_room_state_ids_route)
|
||||
.ruma_route(server_server::create_join_event_template_route)
|
||||
.ruma_route(server_server::create_join_event_v1_route)
|
||||
.ruma_route(server_server::create_join_event_v2_route)
|
||||
.ruma_route(server_server::create_invite_route)
|
||||
.ruma_route(server_server::get_devices_route)
|
||||
.ruma_route(server_server::get_content_route)
|
||||
.ruma_route(server_server::get_content_thumbnail_route)
|
||||
.ruma_route(server_server::get_room_information_route)
|
||||
.ruma_route(server_server::get_profile_information_route)
|
||||
.ruma_route(server_server::get_keys_route)
|
||||
.ruma_route(server_server::claim_keys_route)
|
||||
.ruma_route(server_server::get_openid_userinfo_route)
|
||||
.ruma_route(server_server::well_known_server)
|
||||
} else {
|
||||
router
|
||||
.route("/_matrix/federation/*path", any(federation_disabled))
|
||||
.route("/_matrix/key/*path", any(federation_disabled))
|
||||
.route("/.well-known/matrix/server", any(federation_disabled))
|
||||
}
|
||||
}
|
||||
|
||||
async fn shutdown_signal(handle: ServerHandle) {
|
||||
|
@ -463,6 +494,10 @@ async fn shutdown_signal(handle: ServerHandle) {
|
|||
let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]);
|
||||
}
|
||||
|
||||
async fn federation_disabled(_: Uri) -> impl IntoResponse {
|
||||
Error::bad_config("Federation is disabled.")
|
||||
}
|
||||
|
||||
async fn not_found(uri: Uri) -> impl IntoResponse {
|
||||
warn!("Not found: {uri}");
|
||||
Error::BadRequest(ErrorKind::Unrecognized, "Unrecognized request")
|
||||
|
|
|
@ -1,9 +1,4 @@
|
|||
use std::{
|
||||
collections::BTreeMap,
|
||||
convert::{TryFrom, TryInto},
|
||||
sync::Arc,
|
||||
time::Instant,
|
||||
};
|
||||
use std::{collections::BTreeMap, convert::TryFrom, sync::Arc, time::Instant};
|
||||
|
||||
use clap::Parser;
|
||||
use regex::Regex;
|
||||
|
@ -24,7 +19,8 @@ use ruma::{
|
|||
},
|
||||
TimelineEventType,
|
||||
},
|
||||
EventId, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
|
||||
EventId, MilliSecondsSinceUnixEpoch, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId,
|
||||
RoomVersionId, ServerName, UserId,
|
||||
};
|
||||
use serde_json::value::to_raw_value;
|
||||
use tokio::sync::{mpsc, Mutex, RwLock};
|
||||
|
@ -77,6 +73,12 @@ enum AdminCommand {
|
|||
/// List all rooms we are currently handling an incoming pdu from
|
||||
IncomingFederation,
|
||||
|
||||
/// Removes an alias from the server
|
||||
RemoveAlias {
|
||||
/// The alias to be removed
|
||||
alias: Box<RoomAliasId>,
|
||||
},
|
||||
|
||||
/// Deactivate a user
|
||||
///
|
||||
/// User will not be removed from all rooms by default.
|
||||
|
@ -160,24 +162,23 @@ enum AdminCommand {
|
|||
password: Option<String>,
|
||||
},
|
||||
|
||||
/// Temporarily toggle user registration by passing either true or false as an argument, does not persist between restarts
|
||||
AllowRegistration { status: Option<bool> },
|
||||
|
||||
/// Disables incoming federation handling for a room.
|
||||
DisableRoom { room_id: Box<RoomId> },
|
||||
/// Enables incoming federation handling for a room again.
|
||||
EnableRoom { room_id: Box<RoomId> },
|
||||
|
||||
/// Verify json signatures
|
||||
/// [commandbody]()
|
||||
/// # ```
|
||||
/// # json here
|
||||
/// # ```
|
||||
/// Sign a json object using Conduit's signing keys, putting the json in a codeblock
|
||||
SignJson,
|
||||
|
||||
/// Verify json signatures
|
||||
/// [commandbody]()
|
||||
/// # ```
|
||||
/// # json here
|
||||
/// # ```
|
||||
/// Verify json signatures, putting the json in a codeblock
|
||||
VerifyJson,
|
||||
|
||||
/// Parses a JSON object as an event then creates a hash and signs it, putting a room
|
||||
/// version as an argument, and the json in a codeblock
|
||||
HashAndSignEvent { room_version_id: RoomVersionId },
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
|
@ -212,8 +213,7 @@ impl Service {
|
|||
// TODO: Use futures when we have long admin commands
|
||||
//let mut futures = FuturesUnordered::new();
|
||||
|
||||
let conduit_user = UserId::parse(format!("@conduit:{}", services().globals.server_name()))
|
||||
.expect("@conduit:server_name is valid");
|
||||
let conduit_user = services().globals.server_user();
|
||||
|
||||
if let Ok(Some(conduit_room)) = services().admin.get_admin_room() {
|
||||
loop {
|
||||
|
@ -246,8 +246,9 @@ impl Service {
|
|||
unsigned: None,
|
||||
state_key: None,
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
&conduit_room,
|
||||
&state_lock,
|
||||
)
|
||||
|
@ -605,6 +606,14 @@ impl Service {
|
|||
)))
|
||||
}
|
||||
};
|
||||
|
||||
// Checks if user is local
|
||||
if user_id.server_name() != services().globals.server_name() {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"The specified user is not from this server!",
|
||||
));
|
||||
};
|
||||
|
||||
if user_id.is_historical() {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Userid {user_id} is not allowed due to historical"
|
||||
|
@ -652,6 +661,24 @@ impl Service {
|
|||
"Created user with user_id: {user_id} and password: {password}"
|
||||
))
|
||||
}
|
||||
AdminCommand::AllowRegistration { status } => {
|
||||
if let Some(status) = status {
|
||||
services().globals.set_registration(status).await;
|
||||
RoomMessageEventContent::text_plain(if status {
|
||||
"Registration is now enabled"
|
||||
} else {
|
||||
"Registration is now disabled"
|
||||
})
|
||||
} else {
|
||||
RoomMessageEventContent::text_plain(
|
||||
if services().globals.allow_registration().await {
|
||||
"Registration is currently enabled"
|
||||
} else {
|
||||
"Registration is currently disabled"
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
AdminCommand::DisableRoom { room_id } => {
|
||||
services().rooms.metadata.disable_room(&room_id, true)?;
|
||||
RoomMessageEventContent::text_plain("Room disabled.")
|
||||
|
@ -833,15 +860,46 @@ impl Service {
|
|||
services()
|
||||
.rooms
|
||||
.event_handler
|
||||
// Generally we shouldn't be checking against expired keys unless required, so in the admin
|
||||
// room it might be best to not allow expired keys
|
||||
.fetch_required_signing_keys(&value, &pub_key_map)
|
||||
.await?;
|
||||
|
||||
let pub_key_map = pub_key_map.read().await;
|
||||
match ruma::signatures::verify_json(&pub_key_map, &value) {
|
||||
Ok(_) => RoomMessageEventContent::text_plain("Signature correct"),
|
||||
Err(e) => RoomMessageEventContent::text_plain(format!(
|
||||
let mut expired_key_map = BTreeMap::new();
|
||||
let mut valid_key_map = BTreeMap::new();
|
||||
|
||||
for (server, keys) in pub_key_map.into_inner().into_iter() {
|
||||
if keys.valid_until_ts > MilliSecondsSinceUnixEpoch::now() {
|
||||
valid_key_map.insert(
|
||||
server,
|
||||
keys.verify_keys
|
||||
.into_iter()
|
||||
.map(|(id, key)| (id, key.key))
|
||||
.collect(),
|
||||
);
|
||||
} else {
|
||||
expired_key_map.insert(
|
||||
server,
|
||||
keys.verify_keys
|
||||
.into_iter()
|
||||
.map(|(id, key)| (id, key.key))
|
||||
.collect(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if ruma::signatures::verify_json(&valid_key_map, &value).is_ok() {
|
||||
RoomMessageEventContent::text_plain("Signature correct")
|
||||
} else if let Err(e) =
|
||||
ruma::signatures::verify_json(&expired_key_map, &value)
|
||||
{
|
||||
RoomMessageEventContent::text_plain(format!(
|
||||
"Signature verification failed: {e}"
|
||||
)),
|
||||
))
|
||||
} else {
|
||||
RoomMessageEventContent::text_plain(
|
||||
"Signature correct (with expired keys)",
|
||||
)
|
||||
}
|
||||
}
|
||||
Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json: {e}")),
|
||||
|
@ -852,6 +910,61 @@ impl Service {
|
|||
)
|
||||
}
|
||||
}
|
||||
AdminCommand::HashAndSignEvent { room_version_id } => {
|
||||
if body.len() > 2
|
||||
// Language may be specified as part of the codeblock (e.g. "```json")
|
||||
&& body[0].trim().starts_with("```")
|
||||
&& body.last().unwrap().trim() == "```"
|
||||
{
|
||||
let string = body[1..body.len() - 1].join("\n");
|
||||
match serde_json::from_str(&string) {
|
||||
Ok(mut value) => {
|
||||
if let Err(e) = ruma::signatures::hash_and_sign_event(
|
||||
services().globals.server_name().as_str(),
|
||||
services().globals.keypair(),
|
||||
&mut value,
|
||||
&room_version_id,
|
||||
) {
|
||||
RoomMessageEventContent::text_plain(format!("Invalid event: {e}"))
|
||||
} else {
|
||||
let json_text = serde_json::to_string_pretty(&value)
|
||||
.expect("canonical json is valid json");
|
||||
RoomMessageEventContent::text_plain(json_text)
|
||||
}
|
||||
}
|
||||
Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json: {e}")),
|
||||
}
|
||||
} else {
|
||||
RoomMessageEventContent::text_plain(
|
||||
"Expected code block in command body. Add --help for details.",
|
||||
)
|
||||
}
|
||||
}
|
||||
AdminCommand::RemoveAlias { alias } => {
|
||||
if alias.server_name() != services().globals.server_name() {
|
||||
RoomMessageEventContent::text_plain(
|
||||
"Cannot remove alias which is not from this server",
|
||||
)
|
||||
} else if services()
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_local_alias(&alias)?
|
||||
.is_none()
|
||||
{
|
||||
RoomMessageEventContent::text_plain("No such alias exists")
|
||||
} else {
|
||||
// We execute this as the server user for two reasons
|
||||
// 1. If the user can execute commands in the admin room, they can always remove the alias.
|
||||
// 2. In the future, we are likely going to be able to allow users to execute commands via
|
||||
// other methods, such as IPC, which would lead to us not knowing their user id
|
||||
|
||||
services()
|
||||
.rooms
|
||||
.alias
|
||||
.remove_alias(&alias, services().globals.server_user())?;
|
||||
RoomMessageEventContent::text_plain("Alias removed sucessfully")
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Ok(reply_message_content)
|
||||
|
@ -959,16 +1072,28 @@ impl Service {
|
|||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
// Create a user for the server
|
||||
let conduit_user =
|
||||
UserId::parse_with_server_name("conduit", services().globals.server_name())
|
||||
.expect("@conduit:server_name is valid");
|
||||
let conduit_user = services().globals.server_user();
|
||||
|
||||
services().users.create(&conduit_user, None)?;
|
||||
services().users.create(conduit_user, None)?;
|
||||
|
||||
let mut content = RoomCreateEventContent::new_v1(conduit_user.clone());
|
||||
let room_version = services().globals.default_room_version();
|
||||
let mut content = match room_version {
|
||||
RoomVersionId::V1
|
||||
| RoomVersionId::V2
|
||||
| RoomVersionId::V3
|
||||
| RoomVersionId::V4
|
||||
| RoomVersionId::V5
|
||||
| RoomVersionId::V6
|
||||
| RoomVersionId::V7
|
||||
| RoomVersionId::V8
|
||||
| RoomVersionId::V9
|
||||
| RoomVersionId::V10 => RoomCreateEventContent::new_v1(conduit_user.to_owned()),
|
||||
RoomVersionId::V11 => RoomCreateEventContent::new_v11(),
|
||||
_ => unreachable!("Validity of room version already checked"),
|
||||
};
|
||||
content.federate = true;
|
||||
content.predecessor = None;
|
||||
content.room_version = services().globals.default_room_version();
|
||||
content.room_version = room_version;
|
||||
|
||||
// 1. The room create event
|
||||
services()
|
||||
|
@ -981,8 +1106,9 @@ impl Service {
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
|
@ -1009,8 +1135,9 @@ impl Service {
|
|||
unsigned: None,
|
||||
state_key: Some(conduit_user.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
|
@ -1018,7 +1145,7 @@ impl Service {
|
|||
|
||||
// 3. Power levels
|
||||
let mut users = BTreeMap::new();
|
||||
users.insert(conduit_user.clone(), 100.into());
|
||||
users.insert(conduit_user.to_owned(), 100.into());
|
||||
|
||||
services()
|
||||
.rooms
|
||||
|
@ -1034,8 +1161,9 @@ impl Service {
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
|
@ -1053,8 +1181,9 @@ impl Service {
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
|
@ -1074,8 +1203,9 @@ impl Service {
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
|
@ -1095,8 +1225,9 @@ impl Service {
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
|
@ -1115,8 +1246,9 @@ impl Service {
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
|
@ -1135,17 +1267,16 @@ impl Service {
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 6. Room alias
|
||||
let alias: OwnedRoomAliasId = format!("#admins:{}", services().globals.server_name())
|
||||
.try_into()
|
||||
.expect("#admins:server_name is a valid alias name");
|
||||
let alias: OwnedRoomAliasId = services().globals.admin_alias().to_owned();
|
||||
|
||||
services()
|
||||
.rooms
|
||||
|
@ -1161,14 +1292,18 @@ impl Service {
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
services().rooms.alias.set_alias(&alias, &room_id)?;
|
||||
services()
|
||||
.rooms
|
||||
.alias
|
||||
.set_alias(&alias, &room_id, conduit_user)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1177,15 +1312,10 @@ impl Service {
|
|||
///
|
||||
/// Errors are propagated from the database, and will have None if there is no admin room
|
||||
pub(crate) fn get_admin_room(&self) -> Result<Option<OwnedRoomId>> {
|
||||
let admin_room_alias: Box<RoomAliasId> =
|
||||
format!("#admins:{}", services().globals.server_name())
|
||||
.try_into()
|
||||
.expect("#admins:server_name is a valid alias name");
|
||||
|
||||
services()
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_local_alias(&admin_room_alias)
|
||||
.resolve_local_alias(services().globals.admin_alias())
|
||||
}
|
||||
|
||||
/// Invite the user to the conduit admin room.
|
||||
|
@ -1209,9 +1339,7 @@ impl Service {
|
|||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
// Use the server user to grant the new admin's power level
|
||||
let conduit_user =
|
||||
UserId::parse_with_server_name("conduit", services().globals.server_name())
|
||||
.expect("@conduit:server_name is valid");
|
||||
let conduit_user = services().globals.server_user();
|
||||
|
||||
// Invite and join the real user
|
||||
services()
|
||||
|
@ -1234,8 +1362,9 @@ impl Service {
|
|||
unsigned: None,
|
||||
state_key: Some(user_id.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
|
@ -1260,6 +1389,7 @@ impl Service {
|
|||
unsigned: None,
|
||||
state_key: Some(user_id.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
user_id,
|
||||
&room_id,
|
||||
|
@ -1286,8 +1416,9 @@ impl Service {
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
|
@ -1305,14 +1436,24 @@ impl Service {
|
|||
unsigned: None,
|
||||
state_key: None,
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Checks whether a given user is an admin of this server
|
||||
pub fn user_is_admin(&self, user_id: &UserId) -> Result<bool> {
|
||||
let Some(admin_room) = self.get_admin_room()? else {
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
services().rooms.state_cache.is_joined(user_id, &admin_room)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
|
|
@ -6,7 +6,10 @@ pub use data::Data;
|
|||
|
||||
use futures_util::Future;
|
||||
use regex::RegexSet;
|
||||
use ruma::api::appservice::{Namespace, Registration};
|
||||
use ruma::{
|
||||
api::appservice::{Namespace, Registration},
|
||||
RoomAliasId, RoomId, UserId,
|
||||
};
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::{services, Result};
|
||||
|
@ -83,6 +86,20 @@ pub struct RegistrationInfo {
|
|||
pub rooms: NamespaceRegex,
|
||||
}
|
||||
|
||||
impl RegistrationInfo {
|
||||
/// Checks if a given user ID matches either the users namespace or the localpart specified in the appservice registration
|
||||
pub fn is_user_match(&self, user_id: &UserId) -> bool {
|
||||
self.users.is_match(user_id.as_str())
|
||||
|| self.registration.sender_localpart == user_id.localpart()
|
||||
}
|
||||
|
||||
/// Checks if a given user ID exclusively matches either the users namespace or the localpart specified in the appservice registration
|
||||
pub fn is_exclusive_user_match(&self, user_id: &UserId) -> bool {
|
||||
self.users.is_exclusive_match(user_id.as_str())
|
||||
|| self.registration.sender_localpart == user_id.localpart()
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<Registration> for RegistrationInfo {
|
||||
fn try_from(value: Registration) -> Result<RegistrationInfo, regex::Error> {
|
||||
Ok(RegistrationInfo {
|
||||
|
@ -122,6 +139,7 @@ impl Service {
|
|||
}
|
||||
/// Registers an appservice and returns the ID to the caller.
|
||||
pub async fn register_appservice(&self, yaml: Registration) -> Result<String> {
|
||||
//TODO: Check for collisions between exclusive appservice namespaces
|
||||
services()
|
||||
.appservice
|
||||
.registration_info
|
||||
|
@ -175,6 +193,30 @@ impl Service {
|
|||
.cloned()
|
||||
}
|
||||
|
||||
// Checks if a given user id matches any exclusive appservice regex
|
||||
pub async fn is_exclusive_user_id(&self, user_id: &UserId) -> bool {
|
||||
self.read()
|
||||
.await
|
||||
.values()
|
||||
.any(|info| info.is_exclusive_user_match(user_id))
|
||||
}
|
||||
|
||||
// Checks if a given room alias matches any exclusive appservice regex
|
||||
pub async fn is_exclusive_alias(&self, alias: &RoomAliasId) -> bool {
|
||||
self.read()
|
||||
.await
|
||||
.values()
|
||||
.any(|info| info.aliases.is_exclusive_match(alias.as_str()))
|
||||
}
|
||||
|
||||
// Checks if a given room id matches any exclusive appservice regex
|
||||
pub async fn is_exclusive_room_id(&self, room_id: &RoomId) -> bool {
|
||||
self.read()
|
||||
.await
|
||||
.values()
|
||||
.any(|info| info.rooms.is_exclusive_match(room_id.as_str()))
|
||||
}
|
||||
|
||||
pub fn read(
|
||||
&self,
|
||||
) -> impl Future<Output = tokio::sync::RwLockReadGuard<'_, BTreeMap<String, RegistrationInfo>>>
|
||||
|
|
|
@ -1,13 +1,71 @@
|
|||
use std::collections::BTreeMap;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use ruma::{
|
||||
api::federation::discovery::{ServerSigningKeys, VerifyKey},
|
||||
signatures::Ed25519KeyPair,
|
||||
DeviceId, OwnedServerSigningKeyId, ServerName, UserId,
|
||||
use std::{
|
||||
collections::BTreeMap,
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use crate::Result;
|
||||
use crate::{services, Result};
|
||||
use async_trait::async_trait;
|
||||
use ruma::{
|
||||
api::federation::discovery::{OldVerifyKey, ServerSigningKeys, VerifyKey},
|
||||
serde::Base64,
|
||||
signatures::Ed25519KeyPair,
|
||||
DeviceId, MilliSecondsSinceUnixEpoch, ServerName, UserId,
|
||||
};
|
||||
use serde::Deserialize;
|
||||
|
||||
/// Similar to ServerSigningKeys, but drops a few unnecessary fields we don't require post-validation
|
||||
#[derive(Deserialize, Debug, Clone)]
|
||||
pub struct SigningKeys {
|
||||
pub verify_keys: BTreeMap<String, VerifyKey>,
|
||||
pub old_verify_keys: BTreeMap<String, OldVerifyKey>,
|
||||
pub valid_until_ts: MilliSecondsSinceUnixEpoch,
|
||||
}
|
||||
|
||||
impl SigningKeys {
|
||||
/// Creates the SigningKeys struct, using the keys of the current server
|
||||
pub fn load_own_keys() -> Self {
|
||||
let mut keys = Self {
|
||||
verify_keys: BTreeMap::new(),
|
||||
old_verify_keys: BTreeMap::new(),
|
||||
valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time(
|
||||
SystemTime::now() + Duration::from_secs(7 * 86400),
|
||||
)
|
||||
.expect("Should be valid until year 500,000,000"),
|
||||
};
|
||||
|
||||
keys.verify_keys.insert(
|
||||
format!("ed25519:{}", services().globals.keypair().version()),
|
||||
VerifyKey {
|
||||
key: Base64::new(services().globals.keypair.public_key().to_vec()),
|
||||
},
|
||||
);
|
||||
|
||||
keys
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ServerSigningKeys> for SigningKeys {
|
||||
fn from(value: ServerSigningKeys) -> Self {
|
||||
let ServerSigningKeys {
|
||||
verify_keys,
|
||||
old_verify_keys,
|
||||
valid_until_ts,
|
||||
..
|
||||
} = value;
|
||||
|
||||
Self {
|
||||
verify_keys: verify_keys
|
||||
.into_iter()
|
||||
.map(|(id, key)| (id.to_string(), key))
|
||||
.collect(),
|
||||
old_verify_keys: old_verify_keys
|
||||
.into_iter()
|
||||
.map(|(id, key)| (id.to_string(), key))
|
||||
.collect(),
|
||||
valid_until_ts,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait Data: Send + Sync {
|
||||
|
@ -21,17 +79,23 @@ pub trait Data: Send + Sync {
|
|||
fn clear_caches(&self, amount: u32);
|
||||
fn load_keypair(&self) -> Result<Ed25519KeyPair>;
|
||||
fn remove_keypair(&self) -> Result<()>;
|
||||
fn add_signing_key(
|
||||
/// Only extends the cached keys, not moving any verify_keys to old_verify_keys, as if we suddenly
|
||||
/// recieve requests from the origin server, we want to be able to accept requests from them
|
||||
fn add_signing_key_from_trusted_server(
|
||||
&self,
|
||||
origin: &ServerName,
|
||||
new_keys: ServerSigningKeys,
|
||||
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>>;
|
||||
|
||||
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server.
|
||||
fn signing_keys_for(
|
||||
) -> Result<SigningKeys>;
|
||||
/// Extends cached keys, as well as moving verify_keys that are not present in these new keys to
|
||||
/// old_verify_keys, so that potnetially comprimised keys cannot be used to make requests
|
||||
fn add_signing_key_from_origin(
|
||||
&self,
|
||||
origin: &ServerName,
|
||||
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>>;
|
||||
new_keys: ServerSigningKeys,
|
||||
) -> Result<SigningKeys>;
|
||||
|
||||
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server.
|
||||
fn signing_keys_for(&self, origin: &ServerName) -> Result<Option<SigningKeys>>;
|
||||
fn database_version(&self) -> Result<u64>;
|
||||
fn bump_database_version(&self, new_version: u64) -> Result<()>;
|
||||
}
|
||||
|
|
|
@ -1,24 +1,19 @@
|
|||
mod data;
|
||||
pub use data::Data;
|
||||
pub use data::{Data, SigningKeys};
|
||||
use ruma::{
|
||||
serde::Base64, OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedServerName,
|
||||
OwnedServerSigningKeyId, OwnedUserId,
|
||||
serde::Base64, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedEventId, OwnedRoomAliasId,
|
||||
OwnedRoomId, OwnedServerName, OwnedUserId, RoomAliasId,
|
||||
};
|
||||
|
||||
use crate::api::server_server::FedDest;
|
||||
use crate::api::server_server::DestinationResponse;
|
||||
|
||||
use crate::{services, Config, Error, Result};
|
||||
use futures_util::FutureExt;
|
||||
use hyper::{
|
||||
client::connect::dns::{GaiResolver, Name},
|
||||
service::Service as HyperService,
|
||||
};
|
||||
use reqwest::dns::{Addrs, Resolve, Resolving};
|
||||
use hickory_resolver::TokioAsyncResolver;
|
||||
use hyper_util::client::legacy::connect::dns::{GaiResolver, Name as HyperName};
|
||||
use reqwest::dns::{Addrs, Name, Resolve, Resolving};
|
||||
use ruma::{
|
||||
api::{
|
||||
client::sync::sync_events,
|
||||
federation::discovery::{ServerSigningKeys, VerifyKey},
|
||||
},
|
||||
api::{client::sync::sync_events, federation::discovery::ServerSigningKeys},
|
||||
DeviceId, RoomVersionId, ServerName, UserId,
|
||||
};
|
||||
use std::{
|
||||
|
@ -29,6 +24,7 @@ use std::{
|
|||
iter,
|
||||
net::{IpAddr, SocketAddr},
|
||||
path::PathBuf,
|
||||
str::FromStr,
|
||||
sync::{
|
||||
atomic::{self, AtomicBool},
|
||||
Arc, RwLock as StdRwLock,
|
||||
|
@ -36,12 +32,12 @@ use std::{
|
|||
time::{Duration, Instant},
|
||||
};
|
||||
use tokio::sync::{broadcast, watch::Receiver, Mutex, RwLock, Semaphore};
|
||||
use tower_service::Service as TowerService;
|
||||
use tracing::{error, info};
|
||||
use trust_dns_resolver::TokioAsyncResolver;
|
||||
|
||||
use base64::{engine::general_purpose, Engine as _};
|
||||
|
||||
type WellKnownMap = HashMap<OwnedServerName, (FedDest, String)>;
|
||||
type WellKnownMap = HashMap<OwnedServerName, DestinationResponse>;
|
||||
type TlsNameMap = HashMap<String, (Vec<IpAddr>, u16)>;
|
||||
type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries
|
||||
type SyncHandle = (
|
||||
|
@ -55,6 +51,7 @@ pub struct Service {
|
|||
pub actual_destination_cache: Arc<RwLock<WellKnownMap>>, // actual_destination, host
|
||||
pub tls_name_override: Arc<StdRwLock<TlsNameMap>>,
|
||||
pub config: Config,
|
||||
allow_registration: RwLock<bool>,
|
||||
keypair: Arc<ruma::signatures::Ed25519KeyPair>,
|
||||
dns_resolver: TokioAsyncResolver,
|
||||
jwt_decoding_key: Option<jsonwebtoken::DecodingKey>,
|
||||
|
@ -71,6 +68,8 @@ pub struct Service {
|
|||
pub roomid_mutex_state: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>,
|
||||
pub roomid_mutex_federation: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>, // this lock will be held longer
|
||||
pub roomid_federationhandletime: RwLock<HashMap<OwnedRoomId, (OwnedEventId, Instant)>>,
|
||||
server_user: OwnedUserId,
|
||||
admin_alias: OwnedRoomAliasId,
|
||||
pub stateres_mutex: Arc<Mutex<()>>,
|
||||
pub rotate: RotationHandler,
|
||||
|
||||
|
@ -80,12 +79,12 @@ pub struct Service {
|
|||
/// Handles "rotation" of long-polling requests. "Rotation" in this context is similar to "rotation" of log files and the like.
|
||||
///
|
||||
/// This is utilized to have sync workers return early and release read locks on the database.
|
||||
pub struct RotationHandler(broadcast::Sender<()>, broadcast::Receiver<()>);
|
||||
pub struct RotationHandler(broadcast::Sender<()>);
|
||||
|
||||
impl RotationHandler {
|
||||
pub fn new() -> Self {
|
||||
let (s, r) = broadcast::channel(1);
|
||||
Self(s, r)
|
||||
let s = broadcast::channel(1).0;
|
||||
Self(s)
|
||||
}
|
||||
|
||||
pub fn watch(&self) -> impl Future<Output = ()> {
|
||||
|
@ -137,11 +136,19 @@ impl Resolve for Resolver {
|
|||
})
|
||||
.unwrap_or_else(|| {
|
||||
let this = &mut self.inner.clone();
|
||||
Box::pin(HyperService::<Name>::call(this, name).map(|result| {
|
||||
result
|
||||
.map(|addrs| -> Addrs { Box::new(addrs) })
|
||||
.map_err(|err| -> Box<dyn StdError + Send + Sync> { Box::new(err) })
|
||||
}))
|
||||
Box::pin(
|
||||
TowerService::<HyperName>::call(
|
||||
this,
|
||||
// Beautiful hack, please remove this in the future.
|
||||
HyperName::from_str(name.as_str())
|
||||
.expect("reqwest Name is just wrapper for hyper-util Name"),
|
||||
)
|
||||
.map(|result| {
|
||||
result
|
||||
.map(|addrs| -> Addrs { Box::new(addrs) })
|
||||
.map_err(|err| -> Box<dyn StdError + Send + Sync> { Box::new(err) })
|
||||
}),
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -178,11 +185,17 @@ impl Service {
|
|||
RoomVersionId::V8,
|
||||
RoomVersionId::V9,
|
||||
RoomVersionId::V10,
|
||||
RoomVersionId::V11,
|
||||
];
|
||||
// Experimental, partially supported room versions
|
||||
let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5];
|
||||
|
||||
let mut s = Self {
|
||||
allow_registration: RwLock::new(config.allow_registration),
|
||||
admin_alias: RoomAliasId::parse(format!("#admins:{}", &config.server_name))
|
||||
.expect("#admins:server_name is a valid alias name"),
|
||||
server_user: UserId::parse(format!("@conduit:{}", &config.server_name))
|
||||
.expect("@conduit:server_name is valid"),
|
||||
db,
|
||||
config,
|
||||
keypair: Arc::new(keypair),
|
||||
|
@ -276,6 +289,14 @@ impl Service {
|
|||
self.config.server_name.as_ref()
|
||||
}
|
||||
|
||||
pub fn server_user(&self) -> &UserId {
|
||||
self.server_user.as_ref()
|
||||
}
|
||||
|
||||
pub fn admin_alias(&self) -> &RoomAliasId {
|
||||
self.admin_alias.as_ref()
|
||||
}
|
||||
|
||||
pub fn max_request_size(&self) -> u32 {
|
||||
self.config.max_request_size
|
||||
}
|
||||
|
@ -284,8 +305,15 @@ impl Service {
|
|||
self.config.max_fetch_prev_events
|
||||
}
|
||||
|
||||
pub fn allow_registration(&self) -> bool {
|
||||
self.config.allow_registration
|
||||
/// Allows for the temporary (non-persistant) toggling of registration
|
||||
pub async fn set_registration(&self, status: bool) {
|
||||
let mut lock = self.allow_registration.write().await;
|
||||
*lock = status;
|
||||
}
|
||||
|
||||
/// Checks whether user registration is allowed
|
||||
pub async fn allow_registration(&self) -> bool {
|
||||
*self.allow_registration.read().await
|
||||
}
|
||||
|
||||
pub fn allow_encryption(&self) -> bool {
|
||||
|
@ -361,36 +389,89 @@ impl Service {
|
|||
room_versions
|
||||
}
|
||||
|
||||
/// TODO: the key valid until timestamp is only honored in room version > 4
|
||||
/// Remove the outdated keys and insert the new ones.
|
||||
///
|
||||
/// This doesn't actually check that the keys provided are newer than the old set.
|
||||
pub fn add_signing_key(
|
||||
pub fn add_signing_key_from_trusted_server(
|
||||
&self,
|
||||
origin: &ServerName,
|
||||
new_keys: ServerSigningKeys,
|
||||
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
|
||||
self.db.add_signing_key(origin, new_keys)
|
||||
) -> Result<SigningKeys> {
|
||||
self.db
|
||||
.add_signing_key_from_trusted_server(origin, new_keys)
|
||||
}
|
||||
|
||||
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server.
|
||||
pub fn signing_keys_for(
|
||||
/// Same as from_trusted_server, except it will move active keys not present in `new_keys` to old_signing_keys
|
||||
pub fn add_signing_key_from_origin(
|
||||
&self,
|
||||
origin: &ServerName,
|
||||
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
|
||||
let mut keys = self.db.signing_keys_for(origin)?;
|
||||
if origin == self.server_name() {
|
||||
keys.insert(
|
||||
format!("ed25519:{}", services().globals.keypair().version())
|
||||
.try_into()
|
||||
.expect("found invalid server signing keys in DB"),
|
||||
VerifyKey {
|
||||
key: Base64::new(self.keypair.public_key().to_vec()),
|
||||
},
|
||||
);
|
||||
}
|
||||
new_keys: ServerSigningKeys,
|
||||
) -> Result<SigningKeys> {
|
||||
self.db.add_signing_key_from_origin(origin, new_keys)
|
||||
}
|
||||
|
||||
Ok(keys)
|
||||
/// This returns Ok(None) when there are no keys found for the server.
|
||||
pub fn signing_keys_for(&self, origin: &ServerName) -> Result<Option<SigningKeys>> {
|
||||
Ok(self.db.signing_keys_for(origin)?.or_else(|| {
|
||||
if origin == self.server_name() {
|
||||
Some(SigningKeys::load_own_keys())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
/// Filters the key map of multiple servers down to keys that should be accepted given the expiry time,
|
||||
/// room version, and timestamp of the paramters
|
||||
pub fn filter_keys_server_map(
|
||||
&self,
|
||||
keys: BTreeMap<String, SigningKeys>,
|
||||
timestamp: MilliSecondsSinceUnixEpoch,
|
||||
room_version_id: &RoomVersionId,
|
||||
) -> BTreeMap<String, BTreeMap<String, Base64>> {
|
||||
keys.into_iter()
|
||||
.filter_map(|(server, keys)| {
|
||||
self.filter_keys_single_server(keys, timestamp, room_version_id)
|
||||
.map(|keys| (server, keys))
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Filters the keys of a single server down to keys that should be accepted given the expiry time,
|
||||
/// room version, and timestamp of the paramters
|
||||
pub fn filter_keys_single_server(
|
||||
&self,
|
||||
keys: SigningKeys,
|
||||
timestamp: MilliSecondsSinceUnixEpoch,
|
||||
room_version_id: &RoomVersionId,
|
||||
) -> Option<BTreeMap<String, Base64>> {
|
||||
if keys.valid_until_ts > timestamp
|
||||
// valid_until_ts MUST be ignored in room versions 1, 2, 3, and 4.
|
||||
// https://spec.matrix.org/v1.10/server-server-api/#get_matrixkeyv2server
|
||||
|| matches!(room_version_id, RoomVersionId::V1
|
||||
| RoomVersionId::V2
|
||||
| RoomVersionId::V4
|
||||
| RoomVersionId::V3)
|
||||
{
|
||||
// Given that either the room version allows stale keys, or the valid_until_ts is
|
||||
// in the future, all verify_keys are valid
|
||||
let mut map: BTreeMap<_, _> = keys
|
||||
.verify_keys
|
||||
.into_iter()
|
||||
.map(|(id, key)| (id, key.key))
|
||||
.collect();
|
||||
|
||||
map.extend(keys.old_verify_keys.into_iter().filter_map(|(id, key)| {
|
||||
// Even on old room versions, we don't allow old keys if they are expired
|
||||
if key.expired_ts > timestamp {
|
||||
Some((id, key.key))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}));
|
||||
|
||||
Some(map)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn database_version(&self) -> Result<u64> {
|
||||
|
@ -416,8 +497,12 @@ impl Service {
|
|||
r
|
||||
}
|
||||
|
||||
pub fn well_known_client(&self) -> &Option<String> {
|
||||
&self.config.well_known_client
|
||||
pub fn well_known_server(&self) -> OwnedServerName {
|
||||
self.config.well_known_server()
|
||||
}
|
||||
|
||||
pub fn well_known_client(&self) -> String {
|
||||
self.config.well_known_client()
|
||||
}
|
||||
|
||||
pub fn shutdown(&self) {
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
use ruma::http_headers::ContentDisposition;
|
||||
|
||||
use crate::Result;
|
||||
|
||||
pub trait Data: Send + Sync {
|
||||
|
@ -6,7 +8,7 @@ pub trait Data: Send + Sync {
|
|||
mxc: String,
|
||||
width: u32,
|
||||
height: u32,
|
||||
content_disposition: Option<&str>,
|
||||
content_disposition: &ContentDisposition,
|
||||
content_type: Option<&str>,
|
||||
) -> Result<Vec<u8>>;
|
||||
|
||||
|
@ -16,5 +18,5 @@ pub trait Data: Send + Sync {
|
|||
mxc: String,
|
||||
width: u32,
|
||||
height: u32,
|
||||
) -> Result<(Option<String>, Option<String>, Vec<u8>)>;
|
||||
) -> Result<(ContentDisposition, Option<String>, Vec<u8>)>;
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ mod data;
|
|||
use std::io::Cursor;
|
||||
|
||||
pub use data::Data;
|
||||
use ruma::http_headers::{ContentDisposition, ContentDispositionType};
|
||||
|
||||
use crate::{services, Result};
|
||||
use image::imageops::FilterType;
|
||||
|
@ -12,7 +13,7 @@ use tokio::{
|
|||
};
|
||||
|
||||
pub struct FileMeta {
|
||||
pub content_disposition: Option<String>,
|
||||
pub content_disposition: ContentDisposition,
|
||||
pub content_type: Option<String>,
|
||||
pub file: Vec<u8>,
|
||||
}
|
||||
|
@ -26,14 +27,17 @@ impl Service {
|
|||
pub async fn create(
|
||||
&self,
|
||||
mxc: String,
|
||||
content_disposition: Option<&str>,
|
||||
content_disposition: Option<ContentDisposition>,
|
||||
content_type: Option<&str>,
|
||||
file: &[u8],
|
||||
) -> Result<()> {
|
||||
let content_disposition =
|
||||
content_disposition.unwrap_or(ContentDisposition::new(ContentDispositionType::Inline));
|
||||
|
||||
// Width, Height = 0 if it's not a thumbnail
|
||||
let key = self
|
||||
.db
|
||||
.create_file_metadata(mxc, 0, 0, content_disposition, content_type)?;
|
||||
.create_file_metadata(mxc, 0, 0, &content_disposition, content_type)?;
|
||||
|
||||
let path = services().globals.get_media_file(&key);
|
||||
let mut f = File::create(path).await?;
|
||||
|
@ -46,15 +50,18 @@ impl Service {
|
|||
pub async fn upload_thumbnail(
|
||||
&self,
|
||||
mxc: String,
|
||||
content_disposition: Option<&str>,
|
||||
content_type: Option<&str>,
|
||||
width: u32,
|
||||
height: u32,
|
||||
file: &[u8],
|
||||
) -> Result<()> {
|
||||
let key =
|
||||
self.db
|
||||
.create_file_metadata(mxc, width, height, content_disposition, content_type)?;
|
||||
let key = self.db.create_file_metadata(
|
||||
mxc,
|
||||
width,
|
||||
height,
|
||||
&ContentDisposition::new(ContentDispositionType::Inline),
|
||||
content_type,
|
||||
)?;
|
||||
|
||||
let path = services().globals.get_media_file(&key);
|
||||
let mut f = File::create(path).await?;
|
||||
|
@ -166,22 +173,20 @@ impl Service {
|
|||
/ u64::from(original_height)
|
||||
};
|
||||
if use_width {
|
||||
if intermediate <= u64::from(::std::u32::MAX) {
|
||||
if intermediate <= u64::from(u32::MAX) {
|
||||
(width, intermediate as u32)
|
||||
} else {
|
||||
(
|
||||
(u64::from(width) * u64::from(::std::u32::MAX) / intermediate)
|
||||
as u32,
|
||||
::std::u32::MAX,
|
||||
(u64::from(width) * u64::from(u32::MAX) / intermediate) as u32,
|
||||
u32::MAX,
|
||||
)
|
||||
}
|
||||
} else if intermediate <= u64::from(::std::u32::MAX) {
|
||||
} else if intermediate <= u64::from(u32::MAX) {
|
||||
(intermediate as u32, height)
|
||||
} else {
|
||||
(
|
||||
::std::u32::MAX,
|
||||
(u64::from(height) * u64::from(::std::u32::MAX) / intermediate)
|
||||
as u32,
|
||||
u32::MAX,
|
||||
(u64::from(height) * u64::from(u32::MAX) / intermediate) as u32,
|
||||
)
|
||||
}
|
||||
};
|
||||
|
@ -192,7 +197,7 @@ impl Service {
|
|||
let mut thumbnail_bytes = Vec::new();
|
||||
thumbnail.write_to(
|
||||
&mut Cursor::new(&mut thumbnail_bytes),
|
||||
image::ImageOutputFormat::Png,
|
||||
image::ImageFormat::Png,
|
||||
)?;
|
||||
|
||||
// Save thumbnail in database so we don't have to generate it again next time
|
||||
|
@ -200,7 +205,7 @@ impl Service {
|
|||
mxc,
|
||||
width,
|
||||
height,
|
||||
content_disposition.as_deref(),
|
||||
&content_disposition,
|
||||
content_type.as_deref(),
|
||||
)?;
|
||||
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
use crate::Error;
|
||||
use ruma::{
|
||||
api::client::error::ErrorKind,
|
||||
canonical_json::redact_content_in_place,
|
||||
events::{
|
||||
room::member::RoomMemberEventContent, space::child::HierarchySpaceChildEvent,
|
||||
room::{member::RoomMemberEventContent, redaction::RoomRedactionEventContent},
|
||||
space::child::HierarchySpaceChildEvent,
|
||||
AnyEphemeralRoomEvent, AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent,
|
||||
AnySyncStateEvent, AnySyncTimelineEvent, AnyTimelineEvent, StateEvent, TimelineEventType,
|
||||
},
|
||||
|
@ -24,7 +27,7 @@ pub struct EventHash {
|
|||
pub sha256: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Deserialize, Serialize, Debug)]
|
||||
#[derive(Clone, Deserialize, Debug, Serialize)]
|
||||
pub struct PduEvent {
|
||||
pub event_id: Arc<EventId>,
|
||||
pub room_id: OwnedRoomId,
|
||||
|
@ -49,48 +52,44 @@ pub struct PduEvent {
|
|||
|
||||
impl PduEvent {
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub fn redact(&mut self, reason: &PduEvent) -> crate::Result<()> {
|
||||
pub fn redact(
|
||||
&mut self,
|
||||
room_version_id: RoomVersionId,
|
||||
reason: &PduEvent,
|
||||
) -> crate::Result<()> {
|
||||
self.unsigned = None;
|
||||
|
||||
let allowed: &[&str] = match self.kind {
|
||||
TimelineEventType::RoomMember => &["join_authorised_via_users_server", "membership"],
|
||||
TimelineEventType::RoomCreate => &["creator"],
|
||||
TimelineEventType::RoomJoinRules => &["join_rule"],
|
||||
TimelineEventType::RoomPowerLevels => &[
|
||||
"ban",
|
||||
"events",
|
||||
"events_default",
|
||||
"kick",
|
||||
"redact",
|
||||
"state_default",
|
||||
"users",
|
||||
"users_default",
|
||||
],
|
||||
TimelineEventType::RoomHistoryVisibility => &["history_visibility"],
|
||||
_ => &[],
|
||||
};
|
||||
|
||||
let mut old_content: BTreeMap<String, serde_json::Value> =
|
||||
serde_json::from_str(self.content.get())
|
||||
.map_err(|_| Error::bad_database("PDU in db has invalid content."))?;
|
||||
|
||||
let mut new_content = serde_json::Map::new();
|
||||
|
||||
for key in allowed {
|
||||
if let Some(value) = old_content.remove(*key) {
|
||||
new_content.insert((*key).to_owned(), value);
|
||||
}
|
||||
}
|
||||
let mut content = serde_json::from_str(self.content.get())
|
||||
.map_err(|_| Error::bad_database("PDU in db has invalid content."))?;
|
||||
redact_content_in_place(&mut content, &room_version_id, self.kind.to_string())
|
||||
.map_err(|e| Error::RedactionError(self.sender.server_name().to_owned(), e))?;
|
||||
|
||||
self.unsigned = Some(to_raw_value(&json!({
|
||||
"redacted_because": serde_json::to_value(reason).expect("to_value(PduEvent) always works")
|
||||
})).expect("to string always works"));
|
||||
|
||||
self.content = to_raw_value(&new_content).expect("to string always works");
|
||||
self.content = to_raw_value(&content).expect("to string always works");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn is_redacted(&self) -> bool {
|
||||
#[derive(Deserialize)]
|
||||
struct ExtractRedactedBecause {
|
||||
redacted_because: Option<serde::de::IgnoredAny>,
|
||||
}
|
||||
|
||||
let Some(unsigned) = &self.unsigned else {
|
||||
return false;
|
||||
};
|
||||
|
||||
let Ok(unsigned) = ExtractRedactedBecause::deserialize(&**unsigned) else {
|
||||
return false;
|
||||
};
|
||||
|
||||
unsigned.redacted_because.is_some()
|
||||
}
|
||||
|
||||
pub fn remove_transaction_id(&mut self) -> crate::Result<()> {
|
||||
if let Some(unsigned) = &self.unsigned {
|
||||
let mut unsigned: BTreeMap<String, Box<RawJsonValue>> =
|
||||
|
@ -116,10 +115,43 @@ impl PduEvent {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Copies the `redacts` property of the event to the `content` dict and vice-versa.
|
||||
///
|
||||
/// This follows the specification's
|
||||
/// [recommendation](https://spec.matrix.org/v1.10/rooms/v11/#moving-the-redacts-property-of-mroomredaction-events-to-a-content-property):
|
||||
///
|
||||
/// > For backwards-compatibility with older clients, servers should add a redacts
|
||||
/// > property to the top level of m.room.redaction events in when serving such events
|
||||
/// > over the Client-Server API.
|
||||
/// >
|
||||
/// > For improved compatibility with newer clients, servers should add a redacts property
|
||||
/// > to the content of m.room.redaction events in older room versions when serving
|
||||
/// > such events over the Client-Server API.
|
||||
pub fn copy_redacts(&self) -> (Option<Arc<EventId>>, Box<RawJsonValue>) {
|
||||
if self.kind == TimelineEventType::RoomRedaction {
|
||||
if let Ok(mut content) =
|
||||
serde_json::from_str::<RoomRedactionEventContent>(self.content.get())
|
||||
{
|
||||
if let Some(redacts) = content.redacts {
|
||||
return (Some(redacts.into()), self.content.clone());
|
||||
} else if let Some(redacts) = self.redacts.clone() {
|
||||
content.redacts = Some(redacts.into());
|
||||
return (
|
||||
self.redacts.clone(),
|
||||
to_raw_value(&content).expect("Must be valid, we only added redacts field"),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
(self.redacts.clone(), self.content.clone())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub fn to_sync_room_event(&self) -> Raw<AnySyncTimelineEvent> {
|
||||
let (redacts, content) = self.copy_redacts();
|
||||
let mut json = json!({
|
||||
"content": self.content,
|
||||
"content": content,
|
||||
"type": self.kind,
|
||||
"event_id": self.event_id,
|
||||
"sender": self.sender,
|
||||
|
@ -132,7 +164,7 @@ impl PduEvent {
|
|||
if let Some(state_key) = &self.state_key {
|
||||
json["state_key"] = json!(state_key);
|
||||
}
|
||||
if let Some(redacts) = &self.redacts {
|
||||
if let Some(redacts) = &redacts {
|
||||
json["redacts"] = json!(redacts);
|
||||
}
|
||||
|
||||
|
@ -166,8 +198,9 @@ impl PduEvent {
|
|||
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub fn to_room_event(&self) -> Raw<AnyTimelineEvent> {
|
||||
let (redacts, content) = self.copy_redacts();
|
||||
let mut json = json!({
|
||||
"content": self.content,
|
||||
"content": content,
|
||||
"type": self.kind,
|
||||
"event_id": self.event_id,
|
||||
"sender": self.sender,
|
||||
|
@ -181,7 +214,7 @@ impl PduEvent {
|
|||
if let Some(state_key) = &self.state_key {
|
||||
json["state_key"] = json!(state_key);
|
||||
}
|
||||
if let Some(redacts) = &self.redacts {
|
||||
if let Some(redacts) = &redacts {
|
||||
json["redacts"] = json!(redacts);
|
||||
}
|
||||
|
||||
|
@ -190,8 +223,9 @@ impl PduEvent {
|
|||
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub fn to_message_like_event(&self) -> Raw<AnyMessageLikeEvent> {
|
||||
let (redacts, content) = self.copy_redacts();
|
||||
let mut json = json!({
|
||||
"content": self.content,
|
||||
"content": content,
|
||||
"type": self.kind,
|
||||
"event_id": self.event_id,
|
||||
"sender": self.sender,
|
||||
|
@ -205,7 +239,7 @@ impl PduEvent {
|
|||
if let Some(state_key) = &self.state_key {
|
||||
json["state_key"] = json!(state_key);
|
||||
}
|
||||
if let Some(redacts) = &self.redacts {
|
||||
if let Some(redacts) = &redacts {
|
||||
json["redacts"] = json!(redacts);
|
||||
}
|
||||
|
||||
|
@ -410,7 +444,7 @@ pub(crate) fn gen_event_id_canonical_json(
|
|||
"${}",
|
||||
// Anything higher than version3 behaves the same
|
||||
ruma::signatures::reference_hash(&value, room_version_id)
|
||||
.expect("ruma can calculate reference hashes")
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid PDU format"))?
|
||||
)
|
||||
.try_into()
|
||||
.expect("ruma's reference hashes are valid event ids");
|
||||
|
@ -427,4 +461,8 @@ pub struct PduBuilder {
|
|||
pub unsigned: Option<BTreeMap<String, serde_json::Value>>,
|
||||
pub state_key: Option<String>,
|
||||
pub redacts: Option<Arc<EventId>>,
|
||||
/// For timestamped messaging, should only be used for appservices
|
||||
///
|
||||
/// Will be set to current time if None
|
||||
pub timestamp: Option<MilliSecondsSinceUnixEpoch>,
|
||||
}
|
||||
|
|
|
@ -44,13 +44,13 @@ impl Service {
|
|||
}
|
||||
|
||||
#[tracing::instrument(skip(self, destination, request))]
|
||||
pub async fn send_request<T: OutgoingRequest>(
|
||||
pub async fn send_request<T>(
|
||||
&self,
|
||||
destination: &str,
|
||||
request: T,
|
||||
) -> Result<T::IncomingResponse>
|
||||
where
|
||||
T: Debug,
|
||||
T: OutgoingRequest + Debug,
|
||||
{
|
||||
let destination = destination.replace("/_matrix/push/v1/notify", "");
|
||||
|
||||
|
@ -231,11 +231,11 @@ impl Service {
|
|||
|
||||
let mut device = Device::new(pusher.ids.app_id.clone(), pusher.ids.pushkey.clone());
|
||||
device.data.default_payload = http.default_payload.clone();
|
||||
device.data.format = http.format.clone();
|
||||
device.data.format.clone_from(&http.format);
|
||||
|
||||
// Tweaks are only added if the format is NOT event_id_only
|
||||
if !event_id_only {
|
||||
device.tweaks = tweaks.clone();
|
||||
device.tweaks.clone_from(&tweaks);
|
||||
}
|
||||
|
||||
let d = vec![device];
|
||||
|
|
|
@ -1,9 +1,12 @@
|
|||
use crate::Result;
|
||||
use ruma::{OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId};
|
||||
use ruma::{OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId, UserId};
|
||||
|
||||
pub trait Data: Send + Sync {
|
||||
/// Creates or updates the alias to the given room id.
|
||||
fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()>;
|
||||
fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()>;
|
||||
|
||||
/// Finds the user who assigned the given alias to a room
|
||||
fn who_created_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedUserId>>;
|
||||
|
||||
/// Forgets about an alias. Returns an error if the alias did not exist.
|
||||
fn remove_alias(&self, alias: &RoomAliasId) -> Result<()>;
|
||||
|
|
|
@ -1,9 +1,17 @@
|
|||
mod data;
|
||||
|
||||
pub use data::Data;
|
||||
use tracing::error;
|
||||
|
||||
use crate::Result;
|
||||
use ruma::{OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId};
|
||||
use crate::{services, Error, Result};
|
||||
use ruma::{
|
||||
api::client::error::ErrorKind,
|
||||
events::{
|
||||
room::power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
|
||||
StateEventType,
|
||||
},
|
||||
OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, UserId,
|
||||
};
|
||||
|
||||
pub struct Service {
|
||||
pub db: &'static dyn Data,
|
||||
|
@ -11,13 +19,71 @@ pub struct Service {
|
|||
|
||||
impl Service {
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> {
|
||||
self.db.set_alias(alias, room_id)
|
||||
pub fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()> {
|
||||
if alias == services().globals.admin_alias() && user_id != services().globals.server_user()
|
||||
{
|
||||
Err(Error::BadRequest(
|
||||
ErrorKind::forbidden(),
|
||||
"Only the server user can set this alias",
|
||||
))
|
||||
} else {
|
||||
self.db.set_alias(alias, room_id, user_id)
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub fn remove_alias(&self, alias: &RoomAliasId) -> Result<()> {
|
||||
self.db.remove_alias(alias)
|
||||
fn user_can_remove_alias(&self, alias: &RoomAliasId, user_id: &UserId) -> Result<bool> {
|
||||
let Some(room_id) = self.resolve_local_alias(alias)? else {
|
||||
return Err(Error::BadRequest(ErrorKind::NotFound, "Alias not found."));
|
||||
};
|
||||
|
||||
// The creator of an alias can remove it
|
||||
if self
|
||||
.db
|
||||
.who_created_alias(alias)?
|
||||
.map(|user| user == user_id)
|
||||
.unwrap_or_default()
|
||||
// Server admins can remove any local alias
|
||||
|| services().admin.user_is_admin(user_id)?
|
||||
// Always allow the Conduit user to remove the alias, since there may not be an admin room
|
||||
|| services().globals.server_user ()== user_id
|
||||
{
|
||||
Ok(true)
|
||||
// Checking whether the user is able to change canonical aliases of the room
|
||||
} else if let Some(event) = services().rooms.state_accessor.room_state_get(
|
||||
&room_id,
|
||||
&StateEventType::RoomPowerLevels,
|
||||
"",
|
||||
)? {
|
||||
serde_json::from_str(event.content.get())
|
||||
.map_err(|_| Error::bad_database("Invalid event content for m.room.power_levels"))
|
||||
.map(|content: RoomPowerLevelsEventContent| {
|
||||
RoomPowerLevels::from(content)
|
||||
.user_can_send_state(user_id, StateEventType::RoomCanonicalAlias)
|
||||
})
|
||||
// If there is no power levels event, only the room creator can change canonical aliases
|
||||
} else if let Some(event) = services().rooms.state_accessor.room_state_get(
|
||||
&room_id,
|
||||
&StateEventType::RoomCreate,
|
||||
"",
|
||||
)? {
|
||||
Ok(event.sender == user_id)
|
||||
} else {
|
||||
error!("Room {} has no m.room.create event (VERY BAD)!", room_id);
|
||||
Err(Error::bad_database("Room has no m.room.create event"))
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub fn remove_alias(&self, alias: &RoomAliasId, user_id: &UserId) -> Result<()> {
|
||||
if self.user_can_remove_alias(alias, user_id)? {
|
||||
self.db.remove_alias(alias)
|
||||
} else {
|
||||
Err(Error::BadRequest(
|
||||
ErrorKind::forbidden(),
|
||||
"User is not permitted to remove this alias.",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
|
|
|
@ -133,7 +133,10 @@ impl Service {
|
|||
match services().rooms.timeline.get_pdu(&event_id) {
|
||||
Ok(Some(pdu)) => {
|
||||
if pdu.room_id != room_id {
|
||||
return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db"));
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::forbidden(),
|
||||
"Evil event in db",
|
||||
));
|
||||
}
|
||||
for auth_event in &pdu.auth_events {
|
||||
let sauthevent = services()
|
||||
|
|
|
@ -9,6 +9,7 @@ use std::{
|
|||
};
|
||||
|
||||
use futures_util::{stream::FuturesUnordered, Future, StreamExt};
|
||||
use globals::SigningKeys;
|
||||
use ruma::{
|
||||
api::{
|
||||
client::error::ErrorKind,
|
||||
|
@ -23,11 +24,13 @@ use ruma::{
|
|||
},
|
||||
},
|
||||
events::{
|
||||
room::{create::RoomCreateEventContent, server_acl::RoomServerAclEventContent},
|
||||
StateEventType,
|
||||
room::{
|
||||
create::RoomCreateEventContent, redaction::RoomRedactionEventContent,
|
||||
server_acl::RoomServerAclEventContent,
|
||||
},
|
||||
StateEventType, TimelineEventType,
|
||||
},
|
||||
int,
|
||||
serde::Base64,
|
||||
state_res::{self, RoomVersion, StateMap},
|
||||
uint, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch,
|
||||
OwnedServerName, OwnedServerSigningKeyId, RoomId, RoomVersionId, ServerName,
|
||||
|
@ -75,7 +78,7 @@ impl Service {
|
|||
room_id: &'a RoomId,
|
||||
value: BTreeMap<String, CanonicalJsonValue>,
|
||||
is_timeline_event: bool,
|
||||
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
pub_key_map: &'a RwLock<BTreeMap<String, SigningKeys>>,
|
||||
) -> Result<Option<Vec<u8>>> {
|
||||
// 0. Check the server is in the room
|
||||
if !services().rooms.metadata.exists(room_id)? {
|
||||
|
@ -87,7 +90,7 @@ impl Service {
|
|||
|
||||
if services().rooms.metadata.is_disabled(room_id)? {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"Federation of this room is currently disabled on this server.",
|
||||
));
|
||||
}
|
||||
|
@ -159,7 +162,7 @@ impl Service {
|
|||
// Check for disabled again because it might have changed
|
||||
if services().rooms.metadata.is_disabled(room_id)? {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"Federation of this room is currently disabled on this server.",
|
||||
));
|
||||
}
|
||||
|
@ -301,19 +304,12 @@ impl Service {
|
|||
room_id: &'a RoomId,
|
||||
mut value: BTreeMap<String, CanonicalJsonValue>,
|
||||
auth_events_known: bool,
|
||||
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
pub_key_map: &'a RwLock<BTreeMap<String, SigningKeys>>,
|
||||
) -> AsyncRecursiveType<'a, Result<(Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>)>> {
|
||||
Box::pin(async move {
|
||||
// 1.1. Remove unsigned field
|
||||
value.remove("unsigned");
|
||||
|
||||
// TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json
|
||||
|
||||
// We go through all the signatures we see on the value and fetch the corresponding signing
|
||||
// keys
|
||||
self.fetch_required_signing_keys(&value, pub_key_map)
|
||||
.await?;
|
||||
|
||||
// 2. Check signatures, otherwise drop
|
||||
// 3. check content hash, redact if doesn't match
|
||||
let create_event_content: RoomCreateEventContent =
|
||||
|
@ -326,41 +322,80 @@ impl Service {
|
|||
let room_version =
|
||||
RoomVersion::new(room_version_id).expect("room version is supported");
|
||||
|
||||
let guard = pub_key_map.read().await;
|
||||
let mut val = match ruma::signatures::verify_event(&guard, &value, room_version_id) {
|
||||
Err(e) => {
|
||||
// Drop
|
||||
warn!("Dropping bad event {}: {}", event_id, e,);
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Signature verification failed",
|
||||
));
|
||||
}
|
||||
Ok(ruma::signatures::Verified::Signatures) => {
|
||||
// Redact
|
||||
warn!("Calculated hash does not match: {}", event_id);
|
||||
let obj = match ruma::canonical_json::redact(value, room_version_id, None) {
|
||||
Ok(obj) => obj,
|
||||
Err(_) => {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Redaction failed",
|
||||
))
|
||||
}
|
||||
};
|
||||
// TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json
|
||||
|
||||
// Skip the PDU if it is redacted and we already have it as an outlier event
|
||||
if services().rooms.timeline.get_pdu_json(event_id)?.is_some() {
|
||||
// We go through all the signatures we see on the value and fetch the corresponding signing
|
||||
// keys
|
||||
self.fetch_required_signing_keys(&value, pub_key_map)
|
||||
.await?;
|
||||
|
||||
let origin_server_ts = value.get("origin_server_ts").ok_or_else(|| {
|
||||
error!("Invalid PDU, no origin_server_ts field");
|
||||
Error::BadRequest(
|
||||
ErrorKind::MissingParam,
|
||||
"Invalid PDU, no origin_server_ts field",
|
||||
)
|
||||
})?;
|
||||
|
||||
let origin_server_ts: MilliSecondsSinceUnixEpoch = {
|
||||
let ts = origin_server_ts.as_integer().ok_or_else(|| {
|
||||
Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"origin_server_ts must be an integer",
|
||||
)
|
||||
})?;
|
||||
|
||||
MilliSecondsSinceUnixEpoch(i64::from(ts).try_into().map_err(|_| {
|
||||
Error::BadRequest(ErrorKind::InvalidParam, "Time must be after the unix epoch")
|
||||
})?)
|
||||
};
|
||||
|
||||
let guard = pub_key_map.read().await;
|
||||
|
||||
let pkey_map = (*guard).clone();
|
||||
|
||||
// Removing all the expired keys, unless the room version allows stale keys
|
||||
let filtered_keys = services().globals.filter_keys_server_map(
|
||||
pkey_map,
|
||||
origin_server_ts,
|
||||
room_version_id,
|
||||
);
|
||||
|
||||
let mut val =
|
||||
match ruma::signatures::verify_event(&filtered_keys, &value, room_version_id) {
|
||||
Err(e) => {
|
||||
// Drop
|
||||
warn!("Dropping bad event {}: {}", event_id, e,);
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Event was redacted and we already knew about it",
|
||||
"Signature verification failed",
|
||||
));
|
||||
}
|
||||
Ok(ruma::signatures::Verified::Signatures) => {
|
||||
// Redact
|
||||
warn!("Calculated hash does not match: {}", event_id);
|
||||
let obj = match ruma::canonical_json::redact(value, room_version_id, None) {
|
||||
Ok(obj) => obj,
|
||||
Err(_) => {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Redaction failed",
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
obj
|
||||
}
|
||||
Ok(ruma::signatures::Verified::All) => value,
|
||||
};
|
||||
// Skip the PDU if it is redacted and we already have it as an outlier event
|
||||
if services().rooms.timeline.get_pdu_json(event_id)?.is_some() {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Event was redacted and we already knew about it",
|
||||
));
|
||||
}
|
||||
|
||||
obj
|
||||
}
|
||||
Ok(ruma::signatures::Verified::All) => value,
|
||||
};
|
||||
|
||||
drop(guard);
|
||||
|
||||
|
@ -484,7 +519,7 @@ impl Service {
|
|||
create_event: &PduEvent,
|
||||
origin: &ServerName,
|
||||
room_id: &RoomId,
|
||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>,
|
||||
) -> Result<Option<Vec<u8>>> {
|
||||
// Skip the PDU if we already have it as a timeline event
|
||||
if let Ok(Some(pduid)) = services().rooms.timeline.get_pdu_id(&incoming_pdu.event_id) {
|
||||
|
@ -796,7 +831,51 @@ impl Service {
|
|||
None::<PduEvent>,
|
||||
|k, s| auth_events.get(&(k.clone(), s.to_owned())),
|
||||
)
|
||||
.map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))?;
|
||||
.map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))?
|
||||
|| incoming_pdu.kind == TimelineEventType::RoomRedaction
|
||||
&& match room_version_id {
|
||||
RoomVersionId::V1
|
||||
| RoomVersionId::V2
|
||||
| RoomVersionId::V3
|
||||
| RoomVersionId::V4
|
||||
| RoomVersionId::V5
|
||||
| RoomVersionId::V6
|
||||
| RoomVersionId::V7
|
||||
| RoomVersionId::V8
|
||||
| RoomVersionId::V9
|
||||
| RoomVersionId::V10 => {
|
||||
if let Some(redact_id) = &incoming_pdu.redacts {
|
||||
!services().rooms.state_accessor.user_can_redact(
|
||||
redact_id,
|
||||
&incoming_pdu.sender,
|
||||
&incoming_pdu.room_id,
|
||||
true,
|
||||
)?
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
RoomVersionId::V11 => {
|
||||
let content = serde_json::from_str::<RoomRedactionEventContent>(
|
||||
incoming_pdu.content.get(),
|
||||
)
|
||||
.map_err(|_| Error::bad_database("Invalid content in redaction pdu."))?;
|
||||
|
||||
if let Some(redact_id) = &content.redacts {
|
||||
!services().rooms.state_accessor.user_can_redact(
|
||||
redact_id,
|
||||
&incoming_pdu.sender,
|
||||
&incoming_pdu.room_id,
|
||||
true,
|
||||
)?
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
unreachable!("Validity of room version already checked")
|
||||
}
|
||||
};
|
||||
|
||||
// 13. Use state resolution to find new room state
|
||||
|
||||
|
@ -1050,7 +1129,7 @@ impl Service {
|
|||
create_event: &'a PduEvent,
|
||||
room_id: &'a RoomId,
|
||||
room_version_id: &'a RoomVersionId,
|
||||
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
pub_key_map: &'a RwLock<BTreeMap<String, SigningKeys>>,
|
||||
) -> AsyncRecursiveType<'a, Vec<(Arc<PduEvent>, Option<BTreeMap<String, CanonicalJsonValue>>)>>
|
||||
{
|
||||
Box::pin(async move {
|
||||
|
@ -1233,7 +1312,7 @@ impl Service {
|
|||
create_event: &PduEvent,
|
||||
room_id: &RoomId,
|
||||
room_version_id: &RoomVersionId,
|
||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>,
|
||||
initial_set: Vec<Arc<EventId>>,
|
||||
) -> Result<(
|
||||
Vec<Arc<EventId>>,
|
||||
|
@ -1331,7 +1410,7 @@ impl Service {
|
|||
pub(crate) async fn fetch_required_signing_keys(
|
||||
&self,
|
||||
event: &BTreeMap<String, CanonicalJsonValue>,
|
||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>,
|
||||
) -> Result<()> {
|
||||
let signatures = event
|
||||
.get("signatures")
|
||||
|
@ -1360,6 +1439,7 @@ impl Service {
|
|||
)
|
||||
})?,
|
||||
signature_ids,
|
||||
true,
|
||||
)
|
||||
.await;
|
||||
|
||||
|
@ -1387,7 +1467,7 @@ impl Service {
|
|||
pdu: &RawJsonValue,
|
||||
servers: &mut BTreeMap<OwnedServerName, BTreeMap<OwnedServerSigningKeyId, QueryCriteria>>,
|
||||
room_version: &RoomVersionId,
|
||||
pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap<String, SigningKeys>>,
|
||||
) -> Result<()> {
|
||||
let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
|
||||
error!("Invalid PDU in server response: {:?}: {:?}", pdu, e);
|
||||
|
@ -1397,7 +1477,7 @@ impl Service {
|
|||
let event_id = format!(
|
||||
"${}",
|
||||
ruma::signatures::reference_hash(&value, room_version)
|
||||
.expect("ruma can calculate reference hashes")
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid PDU format"))?
|
||||
);
|
||||
let event_id = <&EventId>::try_from(event_id.as_str())
|
||||
.expect("ruma's reference hashes are valid event ids");
|
||||
|
@ -1438,8 +1518,18 @@ impl Service {
|
|||
|
||||
let signature_ids = signature_object.keys().cloned().collect::<Vec<_>>();
|
||||
|
||||
let contains_all_ids = |keys: &BTreeMap<String, Base64>| {
|
||||
signature_ids.iter().all(|id| keys.contains_key(id))
|
||||
let contains_all_ids = |keys: &SigningKeys| {
|
||||
signature_ids.iter().all(|id| {
|
||||
keys.verify_keys
|
||||
.keys()
|
||||
.map(ToString::to_string)
|
||||
.any(|key_id| id == &key_id)
|
||||
|| keys
|
||||
.old_verify_keys
|
||||
.keys()
|
||||
.map(ToString::to_string)
|
||||
.any(|key_id| id == &key_id)
|
||||
})
|
||||
};
|
||||
|
||||
let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| {
|
||||
|
@ -1452,19 +1542,14 @@ impl Service {
|
|||
|
||||
trace!("Loading signing keys for {}", origin);
|
||||
|
||||
let result: BTreeMap<_, _> = services()
|
||||
.globals
|
||||
.signing_keys_for(origin)?
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.key))
|
||||
.collect();
|
||||
if let Some(result) = services().globals.signing_keys_for(origin)? {
|
||||
if !contains_all_ids(&result) {
|
||||
trace!("Signing key not loaded for {}", origin);
|
||||
servers.insert(origin.to_owned(), BTreeMap::new());
|
||||
}
|
||||
|
||||
if !contains_all_ids(&result) {
|
||||
trace!("Signing key not loaded for {}", origin);
|
||||
servers.insert(origin.to_owned(), BTreeMap::new());
|
||||
pub_key_map.insert(origin.to_string(), result);
|
||||
}
|
||||
|
||||
pub_key_map.insert(origin.to_string(), result);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -1474,7 +1559,7 @@ impl Service {
|
|||
&self,
|
||||
event: &create_join_event::v2::Response,
|
||||
room_version: &RoomVersionId,
|
||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>,
|
||||
) -> Result<()> {
|
||||
let mut servers: BTreeMap<
|
||||
OwnedServerName,
|
||||
|
@ -1537,10 +1622,7 @@ impl Service {
|
|||
|
||||
let result = services()
|
||||
.globals
|
||||
.add_signing_key(&k.server_name, k.clone())?
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.key))
|
||||
.collect::<BTreeMap<_, _>>();
|
||||
.add_signing_key_from_trusted_server(&k.server_name, k.clone())?;
|
||||
|
||||
pkm.insert(k.server_name.to_string(), result);
|
||||
}
|
||||
|
@ -1571,12 +1653,9 @@ impl Service {
|
|||
if let (Ok(get_keys_response), origin) = result {
|
||||
info!("Result is from {origin}");
|
||||
if let Ok(key) = get_keys_response.server_key.deserialize() {
|
||||
let result: BTreeMap<_, _> = services()
|
||||
let result = services()
|
||||
.globals
|
||||
.add_signing_key(&origin, key)?
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.key))
|
||||
.collect();
|
||||
.add_signing_key_from_origin(&origin, key)?;
|
||||
pub_key_map.write().await.insert(origin.to_string(), result);
|
||||
}
|
||||
}
|
||||
|
@ -1608,11 +1687,6 @@ impl Service {
|
|||
}
|
||||
};
|
||||
|
||||
if acl_event_content.allow.is_empty() {
|
||||
// Ignore broken acl events
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if acl_event_content.is_allowed(server_name) {
|
||||
Ok(())
|
||||
} else {
|
||||
|
@ -1621,7 +1695,7 @@ impl Service {
|
|||
server_name, room_id
|
||||
);
|
||||
Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"Server was denied by room ACL",
|
||||
))
|
||||
}
|
||||
|
@ -1634,9 +1708,23 @@ impl Service {
|
|||
&self,
|
||||
origin: &ServerName,
|
||||
signature_ids: Vec<String>,
|
||||
) -> Result<BTreeMap<String, Base64>> {
|
||||
let contains_all_ids =
|
||||
|keys: &BTreeMap<String, Base64>| signature_ids.iter().all(|id| keys.contains_key(id));
|
||||
// Whether to ask for keys from trusted servers. Should be false when getting
|
||||
// keys for validating requests, as per MSC4029
|
||||
query_via_trusted_servers: bool,
|
||||
) -> Result<SigningKeys> {
|
||||
let contains_all_ids = |keys: &SigningKeys| {
|
||||
signature_ids.iter().all(|id| {
|
||||
keys.verify_keys
|
||||
.keys()
|
||||
.map(ToString::to_string)
|
||||
.any(|key_id| id == &key_id)
|
||||
|| keys
|
||||
.old_verify_keys
|
||||
.keys()
|
||||
.map(ToString::to_string)
|
||||
.any(|key_id| id == &key_id)
|
||||
})
|
||||
};
|
||||
|
||||
let permit = services()
|
||||
.globals
|
||||
|
@ -1697,94 +1785,172 @@ impl Service {
|
|||
|
||||
trace!("Loading signing keys for {}", origin);
|
||||
|
||||
let mut result: BTreeMap<_, _> = services()
|
||||
.globals
|
||||
.signing_keys_for(origin)?
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.key))
|
||||
.collect();
|
||||
let result = services().globals.signing_keys_for(origin)?;
|
||||
|
||||
if contains_all_ids(&result) {
|
||||
return Ok(result);
|
||||
let mut expires_soon_or_has_expired = false;
|
||||
|
||||
if let Some(result) = result.clone() {
|
||||
let ts_threshold = MilliSecondsSinceUnixEpoch::from_system_time(
|
||||
SystemTime::now() + Duration::from_secs(30 * 60),
|
||||
)
|
||||
.expect("Should be valid until year 500,000,000");
|
||||
|
||||
debug!(
|
||||
"The treshhold is {:?}, found time is {:?} for server {}",
|
||||
ts_threshold, result.valid_until_ts, origin
|
||||
);
|
||||
|
||||
if contains_all_ids(&result) {
|
||||
// We want to ensure that the keys remain valid by the time the other functions that handle signatures reach them
|
||||
if result.valid_until_ts > ts_threshold {
|
||||
debug!(
|
||||
"Keys for {} are deemed as valid, as they expire at {:?}",
|
||||
&origin, &result.valid_until_ts
|
||||
);
|
||||
return Ok(result);
|
||||
}
|
||||
|
||||
expires_soon_or_has_expired = true;
|
||||
}
|
||||
}
|
||||
|
||||
let mut keys = result.unwrap_or_else(|| SigningKeys {
|
||||
verify_keys: BTreeMap::new(),
|
||||
old_verify_keys: BTreeMap::new(),
|
||||
valid_until_ts: MilliSecondsSinceUnixEpoch::now(),
|
||||
});
|
||||
|
||||
// We want to set this to the max, and then lower it whenever we see older keys
|
||||
keys.valid_until_ts = MilliSecondsSinceUnixEpoch::from_system_time(
|
||||
SystemTime::now() + Duration::from_secs(7 * 86400),
|
||||
)
|
||||
.expect("Should be valid until year 500,000,000");
|
||||
|
||||
debug!("Fetching signing keys for {} over federation", origin);
|
||||
|
||||
if let Some(server_key) = services()
|
||||
if let Some(mut server_key) = services()
|
||||
.sending
|
||||
.send_federation_request(origin, get_server_keys::v2::Request::new())
|
||||
.await
|
||||
.ok()
|
||||
.and_then(|resp| resp.server_key.deserialize().ok())
|
||||
{
|
||||
// Keys should only be valid for a maximum of seven days
|
||||
server_key.valid_until_ts = server_key.valid_until_ts.min(
|
||||
MilliSecondsSinceUnixEpoch::from_system_time(
|
||||
SystemTime::now() + Duration::from_secs(7 * 86400),
|
||||
)
|
||||
.expect("Should be valid until year 500,000,000"),
|
||||
);
|
||||
|
||||
services()
|
||||
.globals
|
||||
.add_signing_key(origin, server_key.clone())?;
|
||||
.add_signing_key_from_origin(origin, server_key.clone())?;
|
||||
|
||||
result.extend(
|
||||
if keys.valid_until_ts > server_key.valid_until_ts {
|
||||
keys.valid_until_ts = server_key.valid_until_ts;
|
||||
}
|
||||
|
||||
keys.verify_keys.extend(
|
||||
server_key
|
||||
.verify_keys
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.key)),
|
||||
.map(|(id, key)| (id.to_string(), key)),
|
||||
);
|
||||
result.extend(
|
||||
keys.old_verify_keys.extend(
|
||||
server_key
|
||||
.old_verify_keys
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.key)),
|
||||
.map(|(id, key)| (id.to_string(), key)),
|
||||
);
|
||||
|
||||
if contains_all_ids(&result) {
|
||||
return Ok(result);
|
||||
if contains_all_ids(&keys) {
|
||||
return Ok(keys);
|
||||
}
|
||||
}
|
||||
|
||||
for server in services().globals.trusted_servers() {
|
||||
debug!("Asking {} for {}'s signing key", server, origin);
|
||||
if let Some(server_keys) = services()
|
||||
.sending
|
||||
.send_federation_request(
|
||||
server,
|
||||
get_remote_server_keys::v2::Request::new(
|
||||
origin.to_owned(),
|
||||
MilliSecondsSinceUnixEpoch::from_system_time(
|
||||
SystemTime::now()
|
||||
.checked_add(Duration::from_secs(3600))
|
||||
.expect("SystemTime to large"),
|
||||
)
|
||||
.expect("time is valid"),
|
||||
),
|
||||
)
|
||||
.await
|
||||
.ok()
|
||||
.map(|resp| {
|
||||
resp.server_keys
|
||||
.into_iter()
|
||||
.filter_map(|e| e.deserialize().ok())
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
{
|
||||
trace!("Got signing keys: {:?}", server_keys);
|
||||
for k in server_keys {
|
||||
services().globals.add_signing_key(origin, k.clone())?;
|
||||
result.extend(
|
||||
k.verify_keys
|
||||
if query_via_trusted_servers {
|
||||
for server in services().globals.trusted_servers() {
|
||||
debug!("Asking {} for {}'s signing key", server, origin);
|
||||
if let Some(server_keys) = services()
|
||||
.sending
|
||||
.send_federation_request(
|
||||
server,
|
||||
get_remote_server_keys::v2::Request::new(
|
||||
origin.to_owned(),
|
||||
MilliSecondsSinceUnixEpoch::from_system_time(
|
||||
SystemTime::now()
|
||||
.checked_add(Duration::from_secs(3600))
|
||||
.expect("SystemTime to large"),
|
||||
)
|
||||
.expect("time is valid"),
|
||||
),
|
||||
)
|
||||
.await
|
||||
.ok()
|
||||
.map(|resp| {
|
||||
resp.server_keys
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.key)),
|
||||
);
|
||||
result.extend(
|
||||
k.old_verify_keys
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.key)),
|
||||
);
|
||||
}
|
||||
.filter_map(|e| e.deserialize().ok())
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
{
|
||||
trace!("Got signing keys: {:?}", server_keys);
|
||||
for mut k in server_keys {
|
||||
if k.valid_until_ts
|
||||
// Half an hour should give plenty of time for the server to respond with keys that are still
|
||||
// valid, given we requested keys which are valid at least an hour from now
|
||||
< MilliSecondsSinceUnixEpoch::from_system_time(
|
||||
SystemTime::now() + Duration::from_secs(30 * 60),
|
||||
)
|
||||
.expect("Should be valid until year 500,000,000")
|
||||
{
|
||||
// Keys should only be valid for a maximum of seven days
|
||||
k.valid_until_ts = k.valid_until_ts.min(
|
||||
MilliSecondsSinceUnixEpoch::from_system_time(
|
||||
SystemTime::now() + Duration::from_secs(7 * 86400),
|
||||
)
|
||||
.expect("Should be valid until year 500,000,000"),
|
||||
);
|
||||
|
||||
if contains_all_ids(&result) {
|
||||
return Ok(result);
|
||||
if keys.valid_until_ts > k.valid_until_ts {
|
||||
keys.valid_until_ts = k.valid_until_ts;
|
||||
}
|
||||
|
||||
services()
|
||||
.globals
|
||||
.add_signing_key_from_trusted_server(origin, k.clone())?;
|
||||
keys.verify_keys.extend(
|
||||
k.verify_keys
|
||||
.into_iter()
|
||||
.map(|(id, key)| (id.to_string(), key)),
|
||||
);
|
||||
keys.old_verify_keys.extend(
|
||||
k.old_verify_keys
|
||||
.into_iter()
|
||||
.map(|(id, key)| (id.to_string(), key)),
|
||||
);
|
||||
} else {
|
||||
warn!(
|
||||
"Server {} gave us keys older than we requested, valid until: {:?}",
|
||||
origin, k.valid_until_ts
|
||||
);
|
||||
}
|
||||
|
||||
if contains_all_ids(&keys) {
|
||||
return Ok(keys);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We should return these keys if fresher keys were not found
|
||||
if expires_soon_or_has_expired {
|
||||
info!("Returning stale keys for {}", origin);
|
||||
return Ok(keys);
|
||||
}
|
||||
|
||||
drop(permit);
|
||||
|
||||
back_off(signature_ids).await;
|
||||
|
|
|
@ -3,9 +3,9 @@ use std::sync::Arc;
|
|||
|
||||
pub use data::Data;
|
||||
use ruma::{
|
||||
api::client::relations::get_relating_events,
|
||||
api::{client::relations::get_relating_events, Direction},
|
||||
events::{relation::RelationType, TimelineEventType},
|
||||
EventId, RoomId, UserId,
|
||||
EventId, RoomId, UInt, UserId,
|
||||
};
|
||||
use serde::Deserialize;
|
||||
|
||||
|
@ -48,37 +48,57 @@ impl Service {
|
|||
target: &EventId,
|
||||
filter_event_type: Option<TimelineEventType>,
|
||||
filter_rel_type: Option<RelationType>,
|
||||
from: PduCount,
|
||||
to: Option<PduCount>,
|
||||
limit: usize,
|
||||
from: Option<String>,
|
||||
to: Option<String>,
|
||||
limit: Option<UInt>,
|
||||
recurse: bool,
|
||||
dir: &Direction,
|
||||
) -> Result<get_relating_events::v1::Response> {
|
||||
let from = match from {
|
||||
Some(from) => PduCount::try_from_string(&from)?,
|
||||
None => match dir {
|
||||
Direction::Forward => PduCount::min(),
|
||||
Direction::Backward => PduCount::max(),
|
||||
},
|
||||
};
|
||||
|
||||
let to = to.as_ref().and_then(|t| PduCount::try_from_string(t).ok());
|
||||
|
||||
// Use limit or else 10, with maximum 100
|
||||
let limit = limit
|
||||
.and_then(|u| u32::try_from(u).ok())
|
||||
.map_or(10_usize, |u| u as usize)
|
||||
.min(100);
|
||||
|
||||
let next_token;
|
||||
|
||||
//TODO: Fix ruma: match body.dir {
|
||||
match ruma::api::Direction::Backward {
|
||||
ruma::api::Direction::Forward => {
|
||||
let events_after: Vec<_> = services()
|
||||
.rooms
|
||||
.pdu_metadata
|
||||
.relations_until(sender_user, room_id, target, from)? // TODO: should be relations_after
|
||||
.filter(|r| {
|
||||
r.as_ref().map_or(true, |(_, pdu)| {
|
||||
filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t)
|
||||
&& if let Ok(content) =
|
||||
serde_json::from_str::<ExtractRelatesToEventId>(
|
||||
pdu.content.get(),
|
||||
)
|
||||
{
|
||||
filter_rel_type
|
||||
.as_ref()
|
||||
.map_or(true, |r| &content.relates_to.rel_type == r)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
})
|
||||
// Spec (v1.10) recommends depth of at least 3
|
||||
let depth: u8 = if recurse { 3 } else { 1 };
|
||||
|
||||
match dir {
|
||||
Direction::Forward => {
|
||||
let relations_until = &services().rooms.pdu_metadata.relations_until(
|
||||
sender_user,
|
||||
room_id,
|
||||
target,
|
||||
from,
|
||||
depth,
|
||||
)?;
|
||||
let events_after: Vec<_> = relations_until // TODO: should be relations_after
|
||||
.iter()
|
||||
.filter(|(_, pdu)| {
|
||||
filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t)
|
||||
&& if let Ok(content) =
|
||||
serde_json::from_str::<ExtractRelatesToEventId>(pdu.content.get())
|
||||
{
|
||||
filter_rel_type
|
||||
.as_ref()
|
||||
.map_or(true, |r| &content.relates_to.rel_type == r)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
})
|
||||
.take(limit)
|
||||
.filter_map(|r| r.ok()) // Filter out buggy events
|
||||
.filter(|(_, pdu)| {
|
||||
services()
|
||||
.rooms
|
||||
|
@ -86,7 +106,7 @@ impl Service {
|
|||
.user_can_see_event(sender_user, room_id, &pdu.event_id)
|
||||
.unwrap_or(false)
|
||||
})
|
||||
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
||||
.take_while(|(k, _)| Some(k) != to.as_ref()) // Stop at `to`
|
||||
.collect();
|
||||
|
||||
next_token = events_after.last().map(|(count, _)| count).copied();
|
||||
|
@ -101,31 +121,32 @@ impl Service {
|
|||
chunk: events_after,
|
||||
next_batch: next_token.map(|t| t.stringify()),
|
||||
prev_batch: Some(from.stringify()),
|
||||
recursion_depth: if recurse { Some(depth.into()) } else { None },
|
||||
})
|
||||
}
|
||||
ruma::api::Direction::Backward => {
|
||||
let events_before: Vec<_> = services()
|
||||
.rooms
|
||||
.pdu_metadata
|
||||
.relations_until(sender_user, room_id, target, from)?
|
||||
.filter(|r| {
|
||||
r.as_ref().map_or(true, |(_, pdu)| {
|
||||
filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t)
|
||||
&& if let Ok(content) =
|
||||
serde_json::from_str::<ExtractRelatesToEventId>(
|
||||
pdu.content.get(),
|
||||
)
|
||||
{
|
||||
filter_rel_type
|
||||
.as_ref()
|
||||
.map_or(true, |r| &content.relates_to.rel_type == r)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
})
|
||||
Direction::Backward => {
|
||||
let relations_until = &services().rooms.pdu_metadata.relations_until(
|
||||
sender_user,
|
||||
room_id,
|
||||
target,
|
||||
from,
|
||||
depth,
|
||||
)?;
|
||||
let events_before: Vec<_> = relations_until
|
||||
.iter()
|
||||
.filter(|(_, pdu)| {
|
||||
filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t)
|
||||
&& if let Ok(content) =
|
||||
serde_json::from_str::<ExtractRelatesToEventId>(pdu.content.get())
|
||||
{
|
||||
filter_rel_type
|
||||
.as_ref()
|
||||
.map_or(true, |r| &content.relates_to.rel_type == r)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
})
|
||||
.take(limit)
|
||||
.filter_map(|r| r.ok()) // Filter out buggy events
|
||||
.filter(|(_, pdu)| {
|
||||
services()
|
||||
.rooms
|
||||
|
@ -133,7 +154,7 @@ impl Service {
|
|||
.user_can_see_event(sender_user, room_id, &pdu.event_id)
|
||||
.unwrap_or(false)
|
||||
})
|
||||
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
||||
.take_while(|&(k, _)| Some(k) != to.as_ref()) // Stop at `to`
|
||||
.collect();
|
||||
|
||||
next_token = events_before.last().map(|(count, _)| count).copied();
|
||||
|
@ -147,6 +168,7 @@ impl Service {
|
|||
chunk: events_before,
|
||||
next_batch: next_token.map(|t| t.stringify()),
|
||||
prev_batch: Some(from.stringify()),
|
||||
recursion_depth: if recurse { Some(depth.into()) } else { None },
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -158,14 +180,44 @@ impl Service {
|
|||
room_id: &'a RoomId,
|
||||
target: &'a EventId,
|
||||
until: PduCount,
|
||||
) -> Result<impl Iterator<Item = Result<(PduCount, PduEvent)>> + 'a> {
|
||||
max_depth: u8,
|
||||
) -> Result<Vec<(PduCount, PduEvent)>> {
|
||||
let room_id = services().rooms.short.get_or_create_shortroomid(room_id)?;
|
||||
let target = match services().rooms.timeline.get_pdu_count(target)? {
|
||||
Some(PduCount::Normal(c)) => c,
|
||||
// TODO: Support backfilled relations
|
||||
_ => 0, // This will result in an empty iterator
|
||||
};
|
||||
self.db.relations_until(user_id, room_id, target, until)
|
||||
|
||||
self.db
|
||||
.relations_until(user_id, room_id, target, until)
|
||||
.map(|mut relations| {
|
||||
let mut pdus: Vec<_> = (*relations).into_iter().filter_map(Result::ok).collect();
|
||||
let mut stack: Vec<_> =
|
||||
pdus.clone().iter().map(|pdu| (pdu.to_owned(), 1)).collect();
|
||||
|
||||
while let Some(stack_pdu) = stack.pop() {
|
||||
let target = match stack_pdu.0 .0 {
|
||||
PduCount::Normal(c) => c,
|
||||
// TODO: Support backfilled relations
|
||||
PduCount::Backfilled(_) => 0, // This will result in an empty iterator
|
||||
};
|
||||
|
||||
if let Ok(relations) = self.db.relations_until(user_id, room_id, target, until)
|
||||
{
|
||||
for relation in relations.flatten() {
|
||||
if stack_pdu.1 < max_depth {
|
||||
stack.push((relation.clone(), stack_pdu.1 + 1));
|
||||
}
|
||||
|
||||
pdus.push(relation);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pdus.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
pdus
|
||||
})
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self, room_id, event_ids))]
|
||||
|
|
|
@ -4,6 +4,8 @@ use ruma::RoomId;
|
|||
pub trait Data: Send + Sync {
|
||||
fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>;
|
||||
|
||||
fn deindex_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>;
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn search_pdus<'a>(
|
||||
&'a self,
|
||||
|
|
|
@ -15,6 +15,16 @@ impl Service {
|
|||
self.db.index_pdu(shortroomid, pdu_id, message_body)
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub fn deindex_pdu<'a>(
|
||||
&self,
|
||||
shortroomid: u64,
|
||||
pdu_id: &[u8],
|
||||
message_body: &str,
|
||||
) -> Result<()> {
|
||||
self.db.deindex_pdu(shortroomid, pdu_id, message_body)
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub fn search_pdus<'a>(
|
||||
&'a self,
|
||||
|
|
|
@ -408,7 +408,7 @@ impl Service {
|
|||
debug!("User is not allowed to see room {room_id}");
|
||||
// This error will be caught later
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"User is not allowed to see the room",
|
||||
));
|
||||
}
|
||||
|
@ -482,7 +482,7 @@ impl Service {
|
|||
match join_rule {
|
||||
JoinRule::Restricted(r) => {
|
||||
for rule in &r.allow {
|
||||
if let join_rules::AllowRule::RoomMembership(rm) = rule {
|
||||
if let AllowRule::RoomMembership(rm) = rule {
|
||||
if let Ok(true) = services()
|
||||
.rooms
|
||||
.state_cache
|
||||
|
|
|
@ -86,7 +86,6 @@ impl Service {
|
|||
membership,
|
||||
&pdu.sender,
|
||||
None,
|
||||
None,
|
||||
false,
|
||||
)?;
|
||||
}
|
||||
|
|
|
@ -13,12 +13,15 @@ use ruma::{
|
|||
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
|
||||
member::{MembershipState, RoomMemberEventContent},
|
||||
name::RoomNameEventContent,
|
||||
power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
|
||||
},
|
||||
StateEventType,
|
||||
},
|
||||
state_res::Event,
|
||||
EventId, JsOption, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
|
||||
};
|
||||
use serde_json::value::to_raw_value;
|
||||
use tokio::sync::MutexGuard;
|
||||
use tracing::{error, warn};
|
||||
|
||||
use crate::{service::pdu::PduBuilder, services, Error, PduEvent, Result};
|
||||
|
@ -307,6 +310,7 @@ impl Service {
|
|||
room_id: &RoomId,
|
||||
sender: &UserId,
|
||||
target_user: &UserId,
|
||||
state_lock: &MutexGuard<'_, ()>,
|
||||
) -> Result<bool> {
|
||||
let content = to_raw_value(&RoomMemberEventContent::new(MembershipState::Invite))
|
||||
.expect("Event content always serializes");
|
||||
|
@ -317,23 +321,13 @@ impl Service {
|
|||
unsigned: None,
|
||||
state_key: Some(target_user.into()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
};
|
||||
|
||||
let mutex_state = Arc::clone(
|
||||
services()
|
||||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
Ok(services()
|
||||
.rooms
|
||||
.timeline
|
||||
.create_hash_and_sign_event(new_event, sender, room_id, &state_lock)
|
||||
.create_hash_and_sign_event(new_event, sender, room_id, state_lock)
|
||||
.is_ok())
|
||||
}
|
||||
|
||||
|
@ -351,4 +345,55 @@ impl Service {
|
|||
.map_err(|_| Error::bad_database("Invalid room member event in database."))
|
||||
})
|
||||
}
|
||||
|
||||
/// Checks if a given user can redact a given event
|
||||
///
|
||||
/// If `federation` is `true`, it allows redaction events from any user of the same server
|
||||
/// as the original event sender, [as required by room versions >=
|
||||
/// v3](https://spec.matrix.org/v1.10/rooms/v11/#handling-redactions)
|
||||
pub fn user_can_redact(
|
||||
&self,
|
||||
redacts: &EventId,
|
||||
sender: &UserId,
|
||||
room_id: &RoomId,
|
||||
federation: bool,
|
||||
) -> Result<bool> {
|
||||
self.room_state_get(room_id, &StateEventType::RoomPowerLevels, "")?
|
||||
.map(|e| {
|
||||
serde_json::from_str(e.content.get())
|
||||
.map(|c: RoomPowerLevelsEventContent| c.into())
|
||||
.map(|e: RoomPowerLevels| {
|
||||
e.user_can_redact_event_of_other(sender)
|
||||
|| e.user_can_redact_own_event(sender)
|
||||
&& if let Ok(Some(pdu)) = services().rooms.timeline.get_pdu(redacts)
|
||||
{
|
||||
if federation {
|
||||
pdu.sender().server_name() == sender.server_name()
|
||||
} else {
|
||||
pdu.sender == sender
|
||||
}
|
||||
} else {
|
||||
false
|
||||
}
|
||||
})
|
||||
.map_err(|_| {
|
||||
Error::bad_database("Invalid m.room.power_levels event in database")
|
||||
})
|
||||
})
|
||||
// Falling back on m.room.create to judge power levels
|
||||
.unwrap_or_else(|| {
|
||||
if let Some(pdu) = self.room_state_get(room_id, &StateEventType::RoomCreate, "")? {
|
||||
Ok(pdu.sender == sender
|
||||
|| if let Ok(Some(pdu)) = services().rooms.timeline.get_pdu(redacts) {
|
||||
pdu.sender == sender
|
||||
} else {
|
||||
false
|
||||
})
|
||||
} else {
|
||||
Err(Error::bad_database(
|
||||
"No m.room.power_levels or m.room.create events in database for room",
|
||||
))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,7 +15,6 @@ pub trait Data: Send + Sync {
|
|||
user_id: &UserId,
|
||||
room_id: &RoomId,
|
||||
last_state: Option<Vec<Raw<AnyStrippedStateEvent>>>,
|
||||
invite_via: Option<Vec<OwnedServerName>>,
|
||||
) -> Result<()>;
|
||||
fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>;
|
||||
|
||||
|
@ -107,14 +106,4 @@ pub trait Data: Send + Sync {
|
|||
fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result<bool>;
|
||||
|
||||
fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result<bool>;
|
||||
|
||||
/// Gets the servers to either accept or decline invites via for a given room.
|
||||
fn servers_invite_via(&self, room_id: &RoomId) -> Result<Option<Vec<OwnedServerName>>>;
|
||||
|
||||
/// Add the given servers the list to accept or decline invites via for a given room.
|
||||
fn add_servers_invite_via(
|
||||
&self,
|
||||
room_id: &RoomId,
|
||||
servers: &Vec<OwnedServerName>,
|
||||
) -> Result<()>;
|
||||
}
|
||||
|
|
|
@ -3,19 +3,14 @@ use std::{collections::HashSet, sync::Arc};
|
|||
|
||||
pub use data::Data;
|
||||
|
||||
use itertools::Itertools;
|
||||
use ruma::{
|
||||
events::{
|
||||
direct::DirectEvent,
|
||||
ignored_user_list::IgnoredUserListEvent,
|
||||
room::{
|
||||
create::RoomCreateEventContent, member::MembershipState,
|
||||
power_levels::RoomPowerLevelsEventContent,
|
||||
},
|
||||
room::{create::RoomCreateEventContent, member::MembershipState},
|
||||
AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType,
|
||||
RoomAccountDataEventType, StateEventType,
|
||||
},
|
||||
int,
|
||||
serde::Raw,
|
||||
OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
|
||||
};
|
||||
|
@ -37,7 +32,6 @@ impl Service {
|
|||
membership: MembershipState,
|
||||
sender: &UserId,
|
||||
last_state: Option<Vec<Raw<AnyStrippedStateEvent>>>,
|
||||
invite_via: Option<Vec<OwnedServerName>>,
|
||||
update_joined_count: bool,
|
||||
) -> Result<()> {
|
||||
// Keep track what remote users exist by adding them as "deactivated" users
|
||||
|
@ -182,8 +176,7 @@ impl Service {
|
|||
return Ok(());
|
||||
}
|
||||
|
||||
self.db
|
||||
.mark_as_invited(user_id, room_id, last_state, invite_via)?;
|
||||
self.db.mark_as_invited(user_id, room_id, last_state)?;
|
||||
}
|
||||
MembershipState::Leave | MembershipState::Ban => {
|
||||
self.db.mark_as_left(user_id, room_id)?;
|
||||
|
@ -357,54 +350,4 @@ impl Service {
|
|||
pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result<bool> {
|
||||
self.db.is_left(user_id, room_id)
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub fn servers_invite_via(&self, room_id: &RoomId) -> Result<Option<Vec<OwnedServerName>>> {
|
||||
self.db.servers_invite_via(room_id)
|
||||
}
|
||||
|
||||
/// Gets up to three servers that are likely to be in the room in the distant future.
|
||||
///
|
||||
/// See https://spec.matrix.org/v1.10/appendices/#routing
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub fn servers_route_via(&self, room_id: &RoomId) -> Result<Vec<OwnedServerName>> {
|
||||
let most_powerful_user_server = services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(room_id, &StateEventType::RoomPowerLevels, "")?
|
||||
.map(|pdu| {
|
||||
serde_json::from_str(pdu.content.get()).map(
|
||||
|conent: RoomPowerLevelsEventContent| {
|
||||
conent
|
||||
.users
|
||||
.iter()
|
||||
.max_by_key(|(_, power)| power)
|
||||
.and_then(|x| if x.1 >= &int!(50) { Some(x) } else { None })
|
||||
.map(|(user, power)| user.server_name().to_owned())
|
||||
},
|
||||
)
|
||||
})
|
||||
.transpose()
|
||||
.map_err(|e| Error::bad_database("Invalid power levels event content in database"))?
|
||||
.flatten();
|
||||
|
||||
let mut servers = services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(room_id)
|
||||
.filter_map(Result::ok)
|
||||
.counts_by(|user| user.server_name())
|
||||
.iter()
|
||||
.sorted_by_key(|(_, users)| users)
|
||||
.map(|(server, _)| server.to_owned().to_owned())
|
||||
.rev()
|
||||
.take(3)
|
||||
.collect_vec();
|
||||
|
||||
if let Some(server) = most_powerful_user_server {
|
||||
servers.insert(0, server);
|
||||
servers.truncate(3);
|
||||
}
|
||||
Ok(servers)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,16 +14,16 @@ use ruma::{
|
|||
events::{
|
||||
push_rules::PushRulesEvent,
|
||||
room::{
|
||||
create::RoomCreateEventContent, encrypted::Relation, member::MembershipState,
|
||||
power_levels::RoomPowerLevelsEventContent,
|
||||
canonical_alias::RoomCanonicalAliasEventContent, create::RoomCreateEventContent,
|
||||
encrypted::Relation, member::MembershipState,
|
||||
power_levels::RoomPowerLevelsEventContent, redaction::RoomRedactionEventContent,
|
||||
},
|
||||
GlobalAccountDataEventType, StateEventType, TimelineEventType,
|
||||
},
|
||||
push::{Action, Ruleset, Tweak},
|
||||
serde::Base64,
|
||||
state_res::{self, Event, RoomVersion},
|
||||
uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId,
|
||||
OwnedServerName, RoomId, ServerName, UserId,
|
||||
uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch,
|
||||
OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, RoomVersionId, ServerName, UserId,
|
||||
};
|
||||
use serde::Deserialize;
|
||||
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
||||
|
@ -33,7 +33,7 @@ use tracing::{error, info, warn};
|
|||
use crate::{
|
||||
api::server_server,
|
||||
service::{
|
||||
appservice::NamespaceRegex,
|
||||
globals::SigningKeys,
|
||||
pdu::{EventHash, PduBuilder},
|
||||
},
|
||||
services, utils, Error, PduEvent, Result,
|
||||
|
@ -382,9 +382,48 @@ impl Service {
|
|||
|
||||
match pdu.kind {
|
||||
TimelineEventType::RoomRedaction => {
|
||||
if let Some(redact_id) = &pdu.redacts {
|
||||
self.redact_pdu(redact_id, pdu)?;
|
||||
}
|
||||
let room_version_id = services().rooms.state.get_room_version(&pdu.room_id)?;
|
||||
match room_version_id {
|
||||
RoomVersionId::V1
|
||||
| RoomVersionId::V2
|
||||
| RoomVersionId::V3
|
||||
| RoomVersionId::V4
|
||||
| RoomVersionId::V5
|
||||
| RoomVersionId::V6
|
||||
| RoomVersionId::V7
|
||||
| RoomVersionId::V8
|
||||
| RoomVersionId::V9
|
||||
| RoomVersionId::V10 => {
|
||||
if let Some(redact_id) = &pdu.redacts {
|
||||
if services().rooms.state_accessor.user_can_redact(
|
||||
redact_id,
|
||||
&pdu.sender,
|
||||
&pdu.room_id,
|
||||
false,
|
||||
)? {
|
||||
self.redact_pdu(redact_id, pdu, shortroomid)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
RoomVersionId::V11 => {
|
||||
let content =
|
||||
serde_json::from_str::<RoomRedactionEventContent>(pdu.content.get())
|
||||
.map_err(|_| {
|
||||
Error::bad_database("Invalid content in redaction pdu.")
|
||||
})?;
|
||||
if let Some(redact_id) = &content.redacts {
|
||||
if services().rooms.state_accessor.user_can_redact(
|
||||
redact_id,
|
||||
&pdu.sender,
|
||||
&pdu.room_id,
|
||||
false,
|
||||
)? {
|
||||
self.redact_pdu(redact_id, pdu, shortroomid)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => unreachable!("Validity of room version already checked"),
|
||||
};
|
||||
}
|
||||
TimelineEventType::SpaceChild => {
|
||||
if let Some(_state_key) = &pdu.state_key {
|
||||
|
@ -427,7 +466,6 @@ impl Service {
|
|||
content.membership,
|
||||
&pdu.sender,
|
||||
invite_state,
|
||||
None,
|
||||
true,
|
||||
)?;
|
||||
}
|
||||
|
@ -447,20 +485,27 @@ impl Service {
|
|||
.search
|
||||
.index_pdu(shortroomid, &pdu_id, &body)?;
|
||||
|
||||
let server_user = format!("@conduit:{}", services().globals.server_name());
|
||||
let server_user = services().globals.server_user();
|
||||
|
||||
let to_conduit = body.starts_with(&format!("{server_user}: "))
|
||||
|| body.starts_with(&format!("{server_user} "))
|
||||
|| body == format!("{server_user}:")
|
||||
|| body == server_user;
|
||||
|| body == server_user.as_str();
|
||||
|
||||
// This will evaluate to false if the emergency password is set up so that
|
||||
// the administrator can execute commands as conduit
|
||||
let from_conduit = pdu.sender == server_user
|
||||
let from_conduit = pdu.sender == *server_user
|
||||
&& services().globals.emergency_password().is_none();
|
||||
|
||||
if let Some(admin_room) = services().admin.get_admin_room()? {
|
||||
if to_conduit && !from_conduit && admin_room == pdu.room_id {
|
||||
if to_conduit
|
||||
&& !from_conduit
|
||||
&& admin_room == pdu.room_id
|
||||
&& services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_joined(server_user, &admin_room)?
|
||||
{
|
||||
services().admin.process_message(body);
|
||||
}
|
||||
}
|
||||
|
@ -556,26 +601,47 @@ impl Service {
|
|||
}
|
||||
}
|
||||
|
||||
let matching_users = |users: &NamespaceRegex| {
|
||||
appservice.users.is_match(pdu.sender.as_str())
|
||||
let matching_users = || {
|
||||
services().globals.server_name() == pdu.sender.server_name()
|
||||
&& appservice.is_user_match(&pdu.sender)
|
||||
|| pdu.kind == TimelineEventType::RoomMember
|
||||
&& pdu
|
||||
.state_key
|
||||
.as_ref()
|
||||
.map_or(false, |state_key| users.is_match(state_key))
|
||||
&& pdu.state_key.as_ref().map_or(false, |state_key| {
|
||||
UserId::parse(state_key).map_or(false, |user_id| {
|
||||
services().globals.server_name() == user_id.server_name()
|
||||
&& appservice.is_user_match(&user_id)
|
||||
})
|
||||
})
|
||||
};
|
||||
let matching_aliases = |aliases: &NamespaceRegex| {
|
||||
|
||||
let matching_aliases = || {
|
||||
services()
|
||||
.rooms
|
||||
.alias
|
||||
.local_aliases_for_room(&pdu.room_id)
|
||||
.filter_map(|r| r.ok())
|
||||
.any(|room_alias| aliases.is_match(room_alias.as_str()))
|
||||
.filter_map(Result::ok)
|
||||
.any(|room_alias| appservice.aliases.is_match(room_alias.as_str()))
|
||||
|| if let Ok(Some(pdu)) = services().rooms.state_accessor.room_state_get(
|
||||
&pdu.room_id,
|
||||
&StateEventType::RoomCanonicalAlias,
|
||||
"",
|
||||
) {
|
||||
serde_json::from_str::<RoomCanonicalAliasEventContent>(pdu.content.get())
|
||||
.map_or(false, |content| {
|
||||
content.alias.map_or(false, |alias| {
|
||||
appservice.aliases.is_match(alias.as_str())
|
||||
}) || content
|
||||
.alt_aliases
|
||||
.iter()
|
||||
.any(|alias| appservice.aliases.is_match(alias.as_str()))
|
||||
})
|
||||
} else {
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
if matching_aliases(&appservice.aliases)
|
||||
if matching_aliases()
|
||||
|| appservice.rooms.is_match(pdu.room_id.as_str())
|
||||
|| matching_users(&appservice.users)
|
||||
|| matching_users()
|
||||
{
|
||||
services()
|
||||
.sending
|
||||
|
@ -599,6 +665,7 @@ impl Service {
|
|||
unsigned,
|
||||
state_key,
|
||||
redacts,
|
||||
timestamp,
|
||||
} = pdu_builder;
|
||||
|
||||
let prev_events: Vec<_> = services()
|
||||
|
@ -609,28 +676,24 @@ impl Service {
|
|||
.take(20)
|
||||
.collect();
|
||||
|
||||
let create_event = services().rooms.state_accessor.room_state_get(
|
||||
room_id,
|
||||
&StateEventType::RoomCreate,
|
||||
"",
|
||||
)?;
|
||||
// If there was no create event yet, assume we are creating a room
|
||||
let room_version_id = services()
|
||||
.rooms
|
||||
.state
|
||||
.get_room_version(room_id)
|
||||
.or_else(|_| {
|
||||
if event_type == TimelineEventType::RoomCreate {
|
||||
let content = serde_json::from_str::<RoomCreateEventContent>(content.get())
|
||||
.expect("Invalid content in RoomCreate pdu.");
|
||||
Ok(content.room_version)
|
||||
} else {
|
||||
Err(Error::InconsistentRoomState(
|
||||
"non-create event for room of unknown version",
|
||||
room_id.to_owned(),
|
||||
))
|
||||
}
|
||||
})?;
|
||||
|
||||
let create_event_content: Option<RoomCreateEventContent> = create_event
|
||||
.as_ref()
|
||||
.map(|create_event| {
|
||||
serde_json::from_str(create_event.content.get()).map_err(|e| {
|
||||
warn!("Invalid create event: {}", e);
|
||||
Error::bad_database("Invalid create event in db.")
|
||||
})
|
||||
})
|
||||
.transpose()?;
|
||||
|
||||
// If there was no create event yet, assume we are creating a room with the default
|
||||
// version right now
|
||||
let room_version_id = create_event_content
|
||||
.map_or(services().globals.default_room_version(), |create_event| {
|
||||
create_event.room_version
|
||||
});
|
||||
let room_version = RoomVersion::new(&room_version_id).expect("room version is supported");
|
||||
|
||||
let auth_events = services().rooms.state.get_auth_events(
|
||||
|
@ -672,9 +735,9 @@ impl Service {
|
|||
event_id: ruma::event_id!("$thiswillbefilledinlater").into(),
|
||||
room_id: room_id.to_owned(),
|
||||
sender: sender.to_owned(),
|
||||
origin_server_ts: utils::millis_since_unix_epoch()
|
||||
.try_into()
|
||||
.expect("time is valid"),
|
||||
origin_server_ts: timestamp
|
||||
.map(|ts| ts.get())
|
||||
.unwrap_or_else(|| MilliSecondsSinceUnixEpoch::now().get()),
|
||||
kind: event_type,
|
||||
content,
|
||||
state_key,
|
||||
|
@ -709,7 +772,7 @@ impl Service {
|
|||
|
||||
if !auth_check {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"Event is not authorized.",
|
||||
));
|
||||
}
|
||||
|
@ -752,7 +815,7 @@ impl Service {
|
|||
pdu.event_id = EventId::parse_arc(format!(
|
||||
"${}",
|
||||
ruma::signatures::reference_hash(&pdu_json, &room_version_id)
|
||||
.expect("ruma can calculate reference hashes")
|
||||
.expect("Event format validated when event was hashed")
|
||||
))
|
||||
.expect("ruma's reference hashes are valid event ids");
|
||||
|
||||
|
@ -789,7 +852,7 @@ impl Service {
|
|||
TimelineEventType::RoomEncryption => {
|
||||
warn!("Encryption is not allowed in the admins room");
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"Encryption is not allowed in the admins room.",
|
||||
));
|
||||
}
|
||||
|
@ -804,7 +867,7 @@ impl Service {
|
|||
.filter(|v| v.starts_with('@'))
|
||||
.unwrap_or(sender.as_str());
|
||||
let server_name = services().globals.server_name();
|
||||
let server_user = format!("@conduit:{}", server_name);
|
||||
let server_user = services().globals.server_user().as_str();
|
||||
let content = serde_json::from_str::<ExtractMembership>(pdu.content.get())
|
||||
.map_err(|_| Error::bad_database("Invalid content in pdu."))?;
|
||||
|
||||
|
@ -812,7 +875,7 @@ impl Service {
|
|||
if target == server_user {
|
||||
warn!("Conduit user cannot leave from admins room");
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"Conduit user cannot leave from admins room.",
|
||||
));
|
||||
}
|
||||
|
@ -828,7 +891,7 @@ impl Service {
|
|||
if count < 2 {
|
||||
warn!("Last admin cannot leave from admins room");
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"Last admin cannot leave from admins room.",
|
||||
));
|
||||
}
|
||||
|
@ -838,7 +901,7 @@ impl Service {
|
|||
if target == server_user {
|
||||
warn!("Conduit user cannot be banned in admins room");
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"Conduit user cannot be banned in admins room.",
|
||||
));
|
||||
}
|
||||
|
@ -854,7 +917,7 @@ impl Service {
|
|||
if count < 2 {
|
||||
warn!("Last admin cannot be banned in admins room");
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"Last admin cannot be banned in admins room.",
|
||||
));
|
||||
}
|
||||
|
@ -865,6 +928,63 @@ impl Service {
|
|||
}
|
||||
}
|
||||
|
||||
// If redaction event is not authorized, do not append it to the timeline
|
||||
if pdu.kind == TimelineEventType::RoomRedaction {
|
||||
match services().rooms.state.get_room_version(&pdu.room_id)? {
|
||||
RoomVersionId::V1
|
||||
| RoomVersionId::V2
|
||||
| RoomVersionId::V3
|
||||
| RoomVersionId::V4
|
||||
| RoomVersionId::V5
|
||||
| RoomVersionId::V6
|
||||
| RoomVersionId::V7
|
||||
| RoomVersionId::V8
|
||||
| RoomVersionId::V9
|
||||
| RoomVersionId::V10 => {
|
||||
if let Some(redact_id) = &pdu.redacts {
|
||||
if !services().rooms.state_accessor.user_can_redact(
|
||||
redact_id,
|
||||
&pdu.sender,
|
||||
&pdu.room_id,
|
||||
false,
|
||||
)? {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::forbidden(),
|
||||
"User cannot redact this event.",
|
||||
));
|
||||
}
|
||||
};
|
||||
}
|
||||
RoomVersionId::V11 => {
|
||||
let content =
|
||||
serde_json::from_str::<RoomRedactionEventContent>(pdu.content.get())
|
||||
.map_err(|_| {
|
||||
Error::bad_database("Invalid content in redaction pdu.")
|
||||
})?;
|
||||
|
||||
if let Some(redact_id) = &content.redacts {
|
||||
if !services().rooms.state_accessor.user_can_redact(
|
||||
redact_id,
|
||||
&pdu.sender,
|
||||
&pdu.room_id,
|
||||
false,
|
||||
)? {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::forbidden(),
|
||||
"User cannot redact this event.",
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::UnsupportedRoomVersion,
|
||||
"Unsupported room version",
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We append to state before appending the pdu, so we don't have a moment in time with the
|
||||
// pdu without it's state. This is okay because append_pdu can't fail.
|
||||
let statehashid = services().rooms.state.append_to_state(&pdu)?;
|
||||
|
@ -990,13 +1110,33 @@ impl Service {
|
|||
|
||||
/// Replace a PDU with the redacted form.
|
||||
#[tracing::instrument(skip(self, reason))]
|
||||
pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> {
|
||||
pub fn redact_pdu(
|
||||
&self,
|
||||
event_id: &EventId,
|
||||
reason: &PduEvent,
|
||||
shortroomid: u64,
|
||||
) -> Result<()> {
|
||||
// TODO: Don't reserialize, keep original json
|
||||
if let Some(pdu_id) = self.get_pdu_id(event_id)? {
|
||||
let mut pdu = self
|
||||
.get_pdu_from_id(&pdu_id)?
|
||||
.ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?;
|
||||
pdu.redact(reason)?;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct ExtractBody {
|
||||
body: String,
|
||||
}
|
||||
|
||||
if let Ok(content) = serde_json::from_str::<ExtractBody>(pdu.content.get()) {
|
||||
services()
|
||||
.rooms
|
||||
.search
|
||||
.deindex_pdu(shortroomid, &pdu_id, &content.body)?;
|
||||
}
|
||||
|
||||
let room_version_id = services().rooms.state.get_room_version(&pdu.room_id)?;
|
||||
pdu.redact(room_version_id, reason)?;
|
||||
|
||||
self.replace_pdu(
|
||||
&pdu_id,
|
||||
&utils::to_canonical_object(&pdu).expect("PDU is an object"),
|
||||
|
@ -1077,7 +1217,7 @@ impl Service {
|
|||
&self,
|
||||
origin: &ServerName,
|
||||
pdu: Box<RawJsonValue>,
|
||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>,
|
||||
) -> Result<()> {
|
||||
let (event_id, value, room_id) = server_server::parse_incoming_pdu(&pdu)?;
|
||||
|
||||
|
|
|
@ -675,13 +675,13 @@ impl Service {
|
|||
}
|
||||
|
||||
#[tracing::instrument(skip(self, destination, request))]
|
||||
pub async fn send_federation_request<T: OutgoingRequest>(
|
||||
pub async fn send_federation_request<T>(
|
||||
&self,
|
||||
destination: &ServerName,
|
||||
request: T,
|
||||
) -> Result<T::IncomingResponse>
|
||||
where
|
||||
T: Debug,
|
||||
T: OutgoingRequest + Debug,
|
||||
{
|
||||
debug!("Waiting for permit");
|
||||
let permit = self.maximum_requests.acquire().await;
|
||||
|
@ -704,13 +704,13 @@ impl Service {
|
|||
///
|
||||
/// Only returns None if there is no url specified in the appservice registration file
|
||||
#[tracing::instrument(skip(self, registration, request))]
|
||||
pub async fn send_appservice_request<T: OutgoingRequest>(
|
||||
pub async fn send_appservice_request<T>(
|
||||
&self,
|
||||
registration: Registration,
|
||||
request: T,
|
||||
) -> Result<Option<T::IncomingResponse>>
|
||||
where
|
||||
T: Debug,
|
||||
T: OutgoingRequest + Debug,
|
||||
{
|
||||
let permit = self.maximum_requests.acquire().await;
|
||||
let response = appservice_server::send_request(registration, request).await;
|
||||
|
|
|
@ -86,7 +86,7 @@ impl Service {
|
|||
|
||||
if !hash_matches {
|
||||
uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody {
|
||||
kind: ErrorKind::Forbidden,
|
||||
kind: ErrorKind::forbidden(),
|
||||
message: "Invalid username or password.".to_owned(),
|
||||
});
|
||||
return Ok((false, uiaainfo));
|
||||
|
@ -101,7 +101,7 @@ impl Service {
|
|||
uiaainfo.completed.push(AuthType::RegistrationToken);
|
||||
} else {
|
||||
uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody {
|
||||
kind: ErrorKind::Forbidden,
|
||||
kind: ErrorKind::forbidden(),
|
||||
message: "Invalid registration token.".to_owned(),
|
||||
});
|
||||
return Ok((false, uiaainfo));
|
||||
|
|
|
@ -211,4 +211,10 @@ pub trait Data: Send + Sync {
|
|||
fn create_filter(&self, user_id: &UserId, filter: &FilterDefinition) -> Result<String>;
|
||||
|
||||
fn get_filter(&self, user_id: &UserId, filter_id: &str) -> Result<Option<FilterDefinition>>;
|
||||
|
||||
// Creates an OpenID token, which can be used to prove that a user has access to an account (primarily for integrations)
|
||||
fn create_openid_token(&self, user_id: &UserId) -> Result<(String, u64)>;
|
||||
|
||||
/// Find out which user an OpenID access token belongs to.
|
||||
fn find_from_openid_token(&self, token: &str) -> Result<Option<OwnedUserId>>;
|
||||
}
|
||||
|
|
|
@ -9,7 +9,6 @@ pub use data::Data;
|
|||
use ruma::{
|
||||
api::client::{
|
||||
device::Device,
|
||||
error::ErrorKind,
|
||||
filter::FilterDefinition,
|
||||
sync::sync_events::{
|
||||
self,
|
||||
|
@ -20,7 +19,7 @@ use ruma::{
|
|||
events::AnyToDeviceEvent,
|
||||
serde::Raw,
|
||||
DeviceId, DeviceKeyAlgorithm, DeviceKeyId, OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri,
|
||||
OwnedRoomId, OwnedUserId, RoomAliasId, UInt, UserId,
|
||||
OwnedRoomId, OwnedUserId, UInt, UserId,
|
||||
};
|
||||
|
||||
use crate::{services, Error, Result};
|
||||
|
@ -86,11 +85,12 @@ impl Service {
|
|||
for (list_id, list) in &mut request.lists {
|
||||
if let Some(cached_list) = cached.lists.get(list_id) {
|
||||
if list.sort.is_empty() {
|
||||
list.sort = cached_list.sort.clone();
|
||||
list.sort.clone_from(&cached_list.sort);
|
||||
};
|
||||
if list.room_details.required_state.is_empty() {
|
||||
list.room_details.required_state =
|
||||
cached_list.room_details.required_state.clone();
|
||||
list.room_details
|
||||
.required_state
|
||||
.clone_from(&cached_list.room_details.required_state);
|
||||
};
|
||||
list.room_details.timeline_limit = list
|
||||
.room_details
|
||||
|
@ -132,7 +132,8 @@ impl Service {
|
|||
(_, _) => {}
|
||||
}
|
||||
if list.bump_event_types.is_empty() {
|
||||
list.bump_event_types = cached_list.bump_event_types.clone();
|
||||
list.bump_event_types
|
||||
.clone_from(&cached_list.bump_event_types);
|
||||
};
|
||||
}
|
||||
cached.lists.insert(list_id.clone(), list.clone());
|
||||
|
@ -260,19 +261,14 @@ impl Service {
|
|||
|
||||
/// Check if a user is an admin
|
||||
pub fn is_admin(&self, user_id: &UserId) -> Result<bool> {
|
||||
let admin_room_alias_id =
|
||||
RoomAliasId::parse(format!("#admins:{}", services().globals.server_name()))
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?;
|
||||
let admin_room_id = services()
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_local_alias(&admin_room_alias_id)?
|
||||
.unwrap();
|
||||
|
||||
services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_joined(user_id, &admin_room_id)
|
||||
if let Some(admin_room_id) = services().admin.get_admin_room()? {
|
||||
services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_joined(user_id, &admin_room_id)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new user account on this homeserver.
|
||||
|
@ -596,6 +592,16 @@ impl Service {
|
|||
) -> Result<Option<FilterDefinition>> {
|
||||
self.db.get_filter(user_id, filter_id)
|
||||
}
|
||||
|
||||
// Creates an OpenID token, which can be used to prove that a user has access to an account (primarily for integrations)
|
||||
pub fn create_openid_token(&self, user_id: &UserId) -> Result<(String, u64)> {
|
||||
self.db.create_openid_token(user_id)
|
||||
}
|
||||
|
||||
/// Find out which user an OpenID access token belongs to.
|
||||
pub fn find_from_openid_token(&self, token: &str) -> Result<Option<OwnedUserId>> {
|
||||
self.db.find_from_openid_token(token)
|
||||
}
|
||||
}
|
||||
|
||||
/// Ensure that a user only sees signatures from themselves and the target user
|
||||
|
|
|
@ -87,6 +87,10 @@ pub enum Error {
|
|||
PathError(#[from] axum::extract::rejection::PathRejection),
|
||||
#[error("{0}")]
|
||||
AdminCommand(&'static str),
|
||||
#[error("from {0}: {1}")]
|
||||
RedactionError(OwnedServerName, ruma::canonical_json::RedactionError),
|
||||
#[error("{0} in {1}")]
|
||||
InconsistentRoomState(&'static str, ruma::OwnedRoomId),
|
||||
}
|
||||
|
||||
impl Error {
|
||||
|
@ -124,7 +128,7 @@ impl Error {
|
|||
kind.clone(),
|
||||
match kind {
|
||||
WrongRoomKeysVersion { .. }
|
||||
| Forbidden
|
||||
| Forbidden { .. }
|
||||
| GuestAccessForbidden
|
||||
| ThreepidAuthFailed
|
||||
| ThreepidDenied => StatusCode::FORBIDDEN,
|
||||
|
|
|
@ -122,16 +122,14 @@ pub fn deserialize_from_str<
|
|||
'de,
|
||||
D: serde::de::Deserializer<'de>,
|
||||
T: FromStr<Err = E>,
|
||||
E: std::fmt::Display,
|
||||
E: fmt::Display,
|
||||
>(
|
||||
deserializer: D,
|
||||
) -> Result<T, D::Error> {
|
||||
struct Visitor<T: FromStr<Err = E>, E>(std::marker::PhantomData<T>);
|
||||
impl<'de, T: FromStr<Err = Err>, Err: std::fmt::Display> serde::de::Visitor<'de>
|
||||
for Visitor<T, Err>
|
||||
{
|
||||
impl<'de, T: FromStr<Err = Err>, Err: fmt::Display> serde::de::Visitor<'de> for Visitor<T, Err> {
|
||||
type Value = T;
|
||||
fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(formatter, "a parsable string")
|
||||
}
|
||||
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
|
||||
|
|
24
taplo.toml
Normal file
24
taplo.toml
Normal file
|
@ -0,0 +1,24 @@
|
|||
exclude = [".**/*.toml"]
|
||||
include = ["**/*.toml"]
|
||||
[formatting]
|
||||
reorder_arrays = true
|
||||
reorder_keys = true
|
||||
|
||||
# Prevent breaking command and argument order
|
||||
[[rule]]
|
||||
include = ["engage.toml"]
|
||||
# https://github.com/tamasfe/taplo/issues/608
|
||||
#keys = ["interpreter"]
|
||||
|
||||
[rule.formatting]
|
||||
reorder_arrays = false
|
||||
|
||||
# Prevent breaking license file order
|
||||
[[rule]]
|
||||
include = ["Cargo.toml"]
|
||||
# https://github.com/tamasfe/taplo/issues/608
|
||||
# keys = ["package.metadata.deb.license-file", "package.metadata.deb.assets"]
|
||||
keys = ["package.metadata.deb", "package.metadata.deb.assets"]
|
||||
|
||||
[rule.formatting]
|
||||
reorder_arrays = false
|
|
@ -7,9 +7,9 @@ server_name = "localhost"
|
|||
database_path = "/tmp"
|
||||
|
||||
# All the other settings are left at their defaults:
|
||||
port = 6167
|
||||
max_request_size = 20_000_000
|
||||
allow_registration = true
|
||||
trusted_servers = ["matrix.org"]
|
||||
address = "127.0.0.1"
|
||||
proxy = "none"
|
||||
allow_registration = true
|
||||
max_request_size = 20_000_000
|
||||
port = 6167
|
||||
proxy = "none"
|
||||
trusted_servers = ["matrix.org"]
|
||||
|
|
Loading…
Add table
Reference in a new issue