Compare commits
382 commits
Author | SHA1 | Date | |
---|---|---|---|
|
3c93c81204 | ||
|
6767ca8bc8 | ||
|
f8d7ef04e6 | ||
|
892fb8846a | ||
|
bca8d1f70f | ||
|
65fe6b0ab5 | ||
|
fea85b0894 | ||
|
a7405cddc0 | ||
|
3df21e8257 | ||
|
e4d6202840 | ||
|
c4810a3a08 | ||
|
73d0536cd3 | ||
|
a6797ca0a2 | ||
|
cdd03dfec0 | ||
|
2bab8869d0 | ||
|
cbd3b07ca7 | ||
|
27d6d94355 | ||
|
a3716a7d5a | ||
|
a9c3867287 | ||
|
423b0928d5 | ||
|
44dd21f432 | ||
|
75a0f68349 | ||
|
8abab8c8a0 | ||
|
324e1beabf | ||
|
00c9ef7b56 | ||
|
6455e918be | ||
|
ea3e7045b4 | ||
|
b8a1b4fee5 | ||
|
d95345377b | ||
|
75322af8c7 | ||
|
11187b3fad | ||
|
35ed731a46 | ||
|
1f313c6807 | ||
|
e70d27af98 | ||
|
ba8429cafe | ||
|
7a4d0f6fe8 | ||
|
2f45a907f9 | ||
|
de0deda179 | ||
|
62f1da053f | ||
|
602c56cae9 | ||
|
4b9520b5ad | ||
|
9014e43ce1 | ||
|
ffc57f8997 | ||
|
fd19dda5cb | ||
|
dc0fa09a57 | ||
|
ba1138aaa3 | ||
|
6398136163 | ||
|
16af8b58ae | ||
|
7a5b893013 | ||
|
c453d45598 | ||
|
144d548ef7 | ||
|
7b259272ce | ||
|
48c1f3bdba | ||
|
dd19877528 | ||
|
ba2a5a6115 | ||
|
a36ccff06a | ||
|
39b4932725 | ||
|
c45e52f45a | ||
|
1dbb3433e0 | ||
|
efecb78888 | ||
|
f25a0b49eb | ||
|
b46000fadc | ||
|
7b19618136 | ||
|
19154a9f70 | ||
|
ec8dfc283c | ||
|
be1b8b68a7 | ||
|
6c2eb4c786 | ||
|
3df791e030 | ||
|
9374b74e77 | ||
|
c732c7c97f | ||
|
33c9da75ec | ||
|
59d7674b2a | ||
|
6bcc2f80b8 | ||
|
817f382c5f | ||
|
a888c7cb16 | ||
|
47aadcea1d | ||
|
9b8ec21e6e | ||
|
e51f60e437 | ||
|
11990e7524 | ||
|
3ad7675bbf | ||
|
e2d91e26d6 | ||
|
20d9f3fd5d | ||
|
965b6df83d | ||
|
8876d54d78 | ||
|
d8badaf64b | ||
|
08485ea5e4 | ||
|
eec9b9ed87 | ||
|
256dae983b | ||
|
79c4bb17ca | ||
|
57a24f234d | ||
|
a4c973e57e | ||
|
f9953c31fc | ||
|
6b669d2f4d | ||
|
90c9794221 | ||
|
358164f49d | ||
|
70b07dfabf | ||
|
d97f5aa3b8 | ||
|
bed9072a69 | ||
|
2a2b9554c8 | ||
|
3d9d975a9f | ||
|
96cc1f6abd | ||
|
55259329a3 | ||
|
d796fe7cd3 | ||
|
3336d3f812 | ||
|
c9ee4a920e | ||
|
5570f5f3da | ||
|
38200163d6 | ||
|
9db1f5a13c | ||
|
0074aca0ef | ||
|
3ecf835b50 | ||
|
6e913bfec4 | ||
|
c1f695653b | ||
|
e6b6cc77d1 | ||
|
b69a74961b | ||
|
622aa574b6 | ||
|
63ba157ef6 | ||
|
dfe2916357 | ||
|
b4a60c3f9a | ||
|
0b217232ac | ||
|
3b7c001350 | ||
|
10412d4be9 | ||
|
3c8879771c | ||
|
4fac7ca169 | ||
|
a23bfdb3f0 | ||
|
60fe238893 | ||
|
cd4e4fe7ac | ||
|
0a7ac058a0 | ||
|
c90e4816b7 | ||
|
2b5295aa29 | ||
|
df0ad2d07c | ||
|
bd5d9a7560 | ||
|
14ede9898d | ||
|
0220e9e9d1 | ||
|
f62db723f7 | ||
|
a499c80d1b | ||
|
5760d98192 | ||
|
2d3f64c1e5 | ||
|
aff97e4032 | ||
|
a56139549f | ||
|
3b6928ebcf | ||
|
61cd2892b8 | ||
|
acef61a3cc | ||
|
c6a7563126 | ||
|
779cebcd77 | ||
|
3b3466fd51 | ||
|
a854ce5cf6 | ||
|
414056442a | ||
|
7c83372336 | ||
|
a854a46c24 | ||
|
429f80548f | ||
|
a140bf8a6f | ||
|
74db555336 | ||
|
08636ef236 | ||
|
3086271139 | ||
|
e40aed3a7d | ||
|
0c0c9549b9 | ||
|
53d3f9ae89 | ||
|
7ace9b0dff | ||
|
624654a88b | ||
|
461236f3fb | ||
|
1c4ae8d268 | ||
|
89c1c2109c | ||
|
1bae8b35a9 | ||
|
00d6aeddb6 | ||
|
18f93ae8f3 | ||
|
6c9c1b5afe | ||
|
27753b1d96 | ||
|
61cb186b5b | ||
|
8c6ffb6bfc | ||
|
2656f6f435 | ||
|
b48e1300f2 | ||
|
1474b94db6 | ||
|
e19e6a4bff | ||
|
95f5c61843 | ||
|
4b288fd22f | ||
|
2d8c551cd5 | ||
|
eb6801290b | ||
|
7a7c09785e | ||
|
d22bf5182b | ||
|
54e0e2a14c | ||
|
475a68cbb9 | ||
|
92817213d5 | ||
|
ab8592526f | ||
|
561a103140 | ||
|
b5e21f761b | ||
|
9e6ce8326f | ||
|
e88d137bd7 | ||
|
7f63948db9 | ||
|
f16bff2466 | ||
|
e8796d6bf9 | ||
|
fe78cc8262 | ||
|
03f9c888f0 | ||
|
2c73c3adbb | ||
|
9497713a79 | ||
|
110b7e10e6 | ||
|
6c3ce71304 | ||
|
fb4217486f | ||
|
dc23206e27 | ||
|
0f6b771cdd | ||
|
24e9c99d47 | ||
|
0d62c9de7c | ||
|
33fb32be9a | ||
|
e38af9b7fc | ||
|
1c529529aa | ||
|
cf1e7bc1ed | ||
|
3ce3d13378 | ||
|
11612e347d | ||
|
7aa70e2030 | ||
|
71546a9fb7 | ||
|
5c634ceb6b | ||
|
8d70f69e62 | ||
|
9176474513 | ||
|
b20483aa13 | ||
|
5c650bb67e | ||
|
b11855e7a1 | ||
|
1fb5bcf98f | ||
|
34e0e710cb | ||
|
0bb28f60cf | ||
|
d2817679e5 | ||
|
6bd7ff4917 | ||
|
bdae9ceccf | ||
|
3ffdaaddcd | ||
|
5a4ee9808a | ||
|
3dd21456ef | ||
|
f6bfba7014 | ||
|
f56abba216 | ||
|
2022efd279 | ||
|
0a790686c5 | ||
|
68a33862b3 | ||
|
879a8b969d | ||
|
81bc1fc4e3 | ||
|
1931a45aba | ||
|
5f0bea6961 | ||
|
120035685b | ||
|
a8da61e5b7 | ||
|
a3968725b4 | ||
|
6800e5fd18 | ||
|
4f8d3953b3 | ||
|
425660472c | ||
|
ab98b52b21 | ||
|
ac22b1bed1 | ||
|
741ca63e94 | ||
|
9a81a49c6a | ||
|
c42aeb506f | ||
|
4af691d737 | ||
|
88fbd5b294 | ||
|
d1bc7fcfd2 | ||
|
dc89fbed3a | ||
|
516876f8ef | ||
|
ed5bd23255 | ||
|
5f053a9357 | ||
|
9ff9e85ebe | ||
|
daed4cdddf | ||
|
086c4daa38 | ||
|
10f3f9da49 | ||
|
fa930182ae | ||
|
a095e02d04 | ||
|
0d2f1348da | ||
|
20bb214d7e | ||
|
ae69da635b | ||
|
d411e9037c | ||
|
d5a9c6ac32 | ||
|
4e09c9e58a | ||
|
6281c64c33 | ||
|
4f352a711a | ||
|
10b7b174b6 | ||
|
e70f33741c | ||
|
161ad8f9a4 | ||
|
732d331847 | ||
|
ee7efdd403 | ||
|
07bb369c5c | ||
|
17dd8cb918 | ||
|
e33d8430d3 | ||
|
c58af8485d | ||
|
becaad677f | ||
|
57575b7c6f | ||
|
4934020ee7 | ||
|
7bb480ceb8 | ||
|
da5975d727 | ||
|
56a57d5489 | ||
|
e06e15d4ec | ||
|
18e684b92e | ||
|
a159fff08a | ||
|
62dda7a43f | ||
|
99ab234f40 | ||
|
e83416bb5a | ||
|
726b6f0fa6 | ||
|
d7fd89df49 | ||
|
f4e57fdb22 | ||
|
4f096adcfa | ||
|
21a5fa3ef0 | ||
|
b27e9ea95c | ||
|
8aa915acb9 | ||
|
ace9637bc2 | ||
|
be1e2e9307 | ||
|
1c6a4b1b24 | ||
|
976a73a0e5 | ||
|
4c06f329c4 | ||
|
d841b81c56 | ||
|
e707084345 | ||
|
6dcc8b6cf1 | ||
|
a2ac491c54 | ||
|
72a13d8353 | ||
|
3a63f9dfb6 | ||
|
f4f2d05b5b | ||
|
c3c7bcb2ed | ||
|
d6c57f9b2e | ||
|
7fb9e99649 | ||
|
1274b48ebb | ||
|
0a281e81a5 | ||
|
a43bde69fa | ||
|
986343877c | ||
|
2d47710b55 | ||
|
10542a1d70 | ||
|
c167f7a6ad | ||
|
5787a70bab | ||
|
cf8f1f2546 | ||
|
3c2fc4a4c6 | ||
|
dffd771e7c | ||
|
4da8c7e282 | ||
|
0df5d18fd6 | ||
|
825ceac1c3 | ||
|
3e389256f5 | ||
|
a7892a28ec | ||
|
9453dbc740 | ||
|
bf48c10d28 | ||
|
7c1a3e41d9 | ||
|
2a04a361e0 | ||
|
0e8e4f1083 | ||
|
81ae579b25 | ||
|
3a3cafe912 | ||
|
d29591d47d | ||
|
67d280dd2e | ||
|
3ac9be5a78 | ||
|
52954f7a11 | ||
|
692a31620d | ||
|
cf4015b830 | ||
|
9cef03127b | ||
|
249fc7769d | ||
|
5cc53c9e14 | ||
|
bdc46f6392 | ||
|
6ae776218c | ||
|
bd2b146d5d | ||
|
f7cc4fb3bb | ||
|
ca198c51fa | ||
|
fe86d28428 | ||
|
c86f9a5c5b | ||
|
e0358a9de5 | ||
|
69d0003222 | ||
|
5cf9f3df48 | ||
|
0b7ed5adc9 | ||
|
4de54db305 | ||
|
02781e4f9b | ||
|
f8bdfd82b0 | ||
|
7e66d2e2c0 | ||
|
ffd03a256b | ||
|
9d592d60d2 | ||
|
25ceb5ebd8 | ||
|
6f052fff98 | ||
|
e8ac881b2f | ||
|
0d17aedae5 | ||
|
ab1fff2642 | ||
|
92c5b6b86c | ||
|
dc2f53e773 | ||
|
2475995102 | ||
|
835f4ad8cf | ||
|
8175bc1246 | ||
|
eb7ac91cd5 | ||
|
ca6219723b | ||
|
40c7c248fb | ||
|
8f3f5c01f9 | ||
|
9d7f7b871b | ||
|
30f0871e21 | ||
|
98e81c6217 | ||
|
5a7bb1e8f1 | ||
|
520806d413 | ||
|
9646439a94 | ||
|
fac995036a | ||
|
18bfd79ef2 | ||
|
a3b8eea9b4 | ||
|
d39d30008a | ||
|
f3b6b3e222 |
141 changed files with 8744 additions and 5558 deletions
15
.editorconfig
Normal file
15
.editorconfig
Normal file
|
@ -0,0 +1,15 @@
|
|||
# EditorConfig is awesome: https://EditorConfig.org
|
||||
|
||||
root = true
|
||||
|
||||
[*]
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
tab_width = 4
|
||||
indent_size = 4
|
||||
indent_style = space
|
||||
insert_final_newline = true
|
||||
max_line_length = 120
|
||||
|
||||
[*.nix]
|
||||
indent_size = 2
|
4
.envrc
4
.envrc
|
@ -1 +1,5 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
use flake
|
||||
|
||||
PATH_add bin
|
||||
|
|
6
.gitignore
vendored
6
.gitignore
vendored
|
@ -68,3 +68,9 @@ cached_target
|
|||
|
||||
# Direnv cache
|
||||
/.direnv
|
||||
|
||||
# Gitlab CI cache
|
||||
/.gitlab-ci.d
|
||||
|
||||
# mdbook output
|
||||
public/
|
423
.gitlab-ci.yml
423
.gitlab-ci.yml
|
@ -1,244 +1,197 @@
|
|||
stages:
|
||||
- build
|
||||
- build docker image
|
||||
- test
|
||||
- upload artifacts
|
||||
- ci
|
||||
- artifacts
|
||||
- publish
|
||||
|
||||
variables:
|
||||
# Make GitLab CI go fast:
|
||||
GIT_SUBMODULE_STRATEGY: recursive
|
||||
FF_USE_FASTZIP: 1
|
||||
CACHE_COMPRESSION_LEVEL: fastest
|
||||
|
||||
# --------------------------------------------------------------------- #
|
||||
# Create and publish docker image #
|
||||
# --------------------------------------------------------------------- #
|
||||
|
||||
.docker-shared-settings:
|
||||
stage: "build docker image"
|
||||
needs: []
|
||||
tags: [ "docker" ]
|
||||
variables:
|
||||
# Docker in Docker:
|
||||
DOCKER_BUILDKIT: 1
|
||||
image:
|
||||
name: docker.io/docker
|
||||
services:
|
||||
- name: docker.io/docker:dind
|
||||
alias: docker
|
||||
script:
|
||||
- apk add openssh-client
|
||||
- eval $(ssh-agent -s)
|
||||
- mkdir -p ~/.ssh && chmod 700 ~/.ssh
|
||||
- printf "Host *\n\tStrictHostKeyChecking no\n\n" >> ~/.ssh/config
|
||||
- sh .gitlab/setup-buildx-remote-builders.sh
|
||||
# Authorize against this project's own image registry:
|
||||
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
|
||||
# Build multiplatform image and push to temporary tag:
|
||||
- >
|
||||
docker buildx build
|
||||
--platform "linux/arm/v7,linux/arm64,linux/amd64"
|
||||
--pull
|
||||
--tag "$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
|
||||
--push
|
||||
--provenance=false
|
||||
--file "Dockerfile" .
|
||||
# Build multiplatform image to deb stage and extract their .deb files:
|
||||
- >
|
||||
docker buildx build
|
||||
--platform "linux/arm/v7,linux/arm64,linux/amd64"
|
||||
--target "packager-result"
|
||||
--output="type=local,dest=/tmp/build-output"
|
||||
--provenance=false
|
||||
--file "Dockerfile" .
|
||||
# Build multiplatform image to binary stage and extract their binaries:
|
||||
- >
|
||||
docker buildx build
|
||||
--platform "linux/arm/v7,linux/arm64,linux/amd64"
|
||||
--target "builder-result"
|
||||
--output="type=local,dest=/tmp/build-output"
|
||||
--provenance=false
|
||||
--file "Dockerfile" .
|
||||
# Copy to GitLab container registry:
|
||||
- >
|
||||
docker buildx imagetools create
|
||||
--tag "$CI_REGISTRY_IMAGE/$TAG"
|
||||
--tag "$CI_REGISTRY_IMAGE/$TAG-bullseye"
|
||||
--tag "$CI_REGISTRY_IMAGE/$TAG-commit-$CI_COMMIT_SHORT_SHA"
|
||||
"$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
|
||||
# if DockerHub credentials exist, also copy to dockerhub:
|
||||
- if [ -n "${DOCKER_HUB}" ]; then docker login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD" "$DOCKER_HUB"; fi
|
||||
- >
|
||||
if [ -n "${DOCKER_HUB}" ]; then
|
||||
docker buildx imagetools create
|
||||
--tag "$DOCKER_HUB_IMAGE/$TAG"
|
||||
--tag "$DOCKER_HUB_IMAGE/$TAG-bullseye"
|
||||
--tag "$DOCKER_HUB_IMAGE/$TAG-commit-$CI_COMMIT_SHORT_SHA"
|
||||
"$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
|
||||
; fi
|
||||
- mv /tmp/build-output ./
|
||||
artifacts:
|
||||
paths:
|
||||
- "./build-output/"
|
||||
|
||||
docker:next:
|
||||
extends: .docker-shared-settings
|
||||
rules:
|
||||
- if: '$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_BRANCH == "next"'
|
||||
variables:
|
||||
TAG: "matrix-conduit:next"
|
||||
|
||||
docker:master:
|
||||
extends: .docker-shared-settings
|
||||
rules:
|
||||
- if: '$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_BRANCH == "master"'
|
||||
variables:
|
||||
TAG: "matrix-conduit:latest"
|
||||
|
||||
docker:tags:
|
||||
extends: .docker-shared-settings
|
||||
rules:
|
||||
- if: "$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_TAG"
|
||||
variables:
|
||||
TAG: "matrix-conduit:$CI_COMMIT_TAG"
|
||||
|
||||
|
||||
docker build debugging:
|
||||
extends: .docker-shared-settings
|
||||
rules:
|
||||
- if: "$CI_MERGE_REQUEST_TITLE =~ /.*[Dd]ocker.*/"
|
||||
variables:
|
||||
TAG: "matrix-conduit-docker-tests:latest"
|
||||
|
||||
# --------------------------------------------------------------------- #
|
||||
# Run tests #
|
||||
# --------------------------------------------------------------------- #
|
||||
|
||||
cargo check:
|
||||
stage: test
|
||||
image: docker.io/rust:1.70.0-bullseye
|
||||
needs: []
|
||||
interruptible: true
|
||||
before_script:
|
||||
- "rustup show && rustc --version && cargo --version" # Print version info for debugging
|
||||
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
|
||||
script:
|
||||
- cargo check
|
||||
|
||||
|
||||
.test-shared-settings:
|
||||
stage: "test"
|
||||
needs: []
|
||||
image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest"
|
||||
tags: ["docker"]
|
||||
variables:
|
||||
CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow
|
||||
interruptible: true
|
||||
|
||||
test:cargo:
|
||||
extends: .test-shared-settings
|
||||
before_script:
|
||||
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
|
||||
script:
|
||||
- rustc --version && cargo --version # Print version info for debugging
|
||||
- "cargo test --color always --workspace --verbose --locked --no-fail-fast"
|
||||
|
||||
test:clippy:
|
||||
extends: .test-shared-settings
|
||||
allow_failure: true
|
||||
before_script:
|
||||
- rustup component add clippy
|
||||
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
|
||||
script:
|
||||
- rustc --version && cargo --version # Print version info for debugging
|
||||
- "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json"
|
||||
artifacts:
|
||||
when: always
|
||||
reports:
|
||||
codequality: gl-code-quality-report.json
|
||||
|
||||
test:format:
|
||||
extends: .test-shared-settings
|
||||
before_script:
|
||||
- rustup component add rustfmt
|
||||
script:
|
||||
- cargo fmt --all -- --check
|
||||
|
||||
test:audit:
|
||||
extends: .test-shared-settings
|
||||
allow_failure: true
|
||||
script:
|
||||
- cargo audit --color always || true
|
||||
- cargo audit --stale --json | gitlab-report -p audit > gl-sast-report.json
|
||||
artifacts:
|
||||
when: always
|
||||
reports:
|
||||
sast: gl-sast-report.json
|
||||
|
||||
test:dockerlint:
|
||||
stage: "test"
|
||||
needs: []
|
||||
image: "ghcr.io/hadolint/hadolint@sha256:6c4b7c23f96339489dd35f21a711996d7ce63047467a9a562287748a03ad5242" # 2.8.0-alpine
|
||||
interruptible: true
|
||||
script:
|
||||
- hadolint --version
|
||||
# First pass: Print for CI log:
|
||||
- >
|
||||
hadolint
|
||||
--no-fail --verbose
|
||||
./Dockerfile
|
||||
# Then output the results into a json for GitLab to pretty-print this in the MR:
|
||||
- >
|
||||
hadolint
|
||||
--format gitlab_codeclimate
|
||||
--failure-threshold error
|
||||
./Dockerfile > dockerlint.json
|
||||
artifacts:
|
||||
when: always
|
||||
reports:
|
||||
codequality: dockerlint.json
|
||||
paths:
|
||||
- dockerlint.json
|
||||
rules:
|
||||
- if: '$CI_COMMIT_REF_NAME != "master"'
|
||||
changes:
|
||||
- docker/*Dockerfile
|
||||
- Dockerfile
|
||||
- .gitlab-ci.yml
|
||||
- if: '$CI_COMMIT_REF_NAME == "master"'
|
||||
- if: '$CI_COMMIT_REF_NAME == "next"'
|
||||
|
||||
# --------------------------------------------------------------------- #
|
||||
# Store binaries as package so they have download urls #
|
||||
# --------------------------------------------------------------------- #
|
||||
|
||||
# DISABLED FOR NOW, NEEDS TO BE FIXED AT A LATER TIME:
|
||||
|
||||
#publish:package:
|
||||
# stage: "upload artifacts"
|
||||
# needs:
|
||||
# - "docker:tags"
|
||||
# rules:
|
||||
# - if: "$CI_COMMIT_TAG"
|
||||
# image: curlimages/curl:latest
|
||||
# tags: ["docker"]
|
||||
# variables:
|
||||
# GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts
|
||||
# script:
|
||||
# - 'BASE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}"'
|
||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_amd64/conduit "${BASE_URL}/conduit-x86_64-unknown-linux-gnu"'
|
||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm_v7/conduit "${BASE_URL}/conduit-armv7-unknown-linux-gnu"'
|
||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm64/conduit "${BASE_URL}/conduit-aarch64-unknown-linux-gnu"'
|
||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_amd64/conduit.deb "${BASE_URL}/conduit-x86_64-unknown-linux-gnu.deb"'
|
||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm_v7/conduit.deb "${BASE_URL}/conduit-armv7-unknown-linux-gnu.deb"'
|
||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm64/conduit.deb "${BASE_URL}/conduit-aarch64-unknown-linux-gnu.deb"'
|
||||
# Makes some things print in color
|
||||
TERM: ansi
|
||||
# Faster cache and artifact compression / decompression
|
||||
FF_USE_FASTZIP: true
|
||||
# Print progress reports for cache and artifact transfers
|
||||
TRANSFER_METER_FREQUENCY: 5s
|
||||
|
||||
# Avoid duplicate pipelines
|
||||
# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
|
||||
workflow:
|
||||
rules:
|
||||
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
|
||||
- if: "$CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS"
|
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||
- if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS
|
||||
when: never
|
||||
- if: "$CI_COMMIT_BRANCH"
|
||||
- if: "$CI_COMMIT_TAG"
|
||||
- if: $CI
|
||||
|
||||
before_script:
|
||||
# Enable nix-command and flakes
|
||||
- if command -v nix > /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi
|
||||
|
||||
# Add our own binary cache
|
||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://attic.conduit.rs/conduit" >> /etc/nix/nix.conf; fi
|
||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduit:ddcaWZiWm0l0IXZlO8FERRdWvEufwmd0Negl1P+c0Ns=" >> /etc/nix/nix.conf; fi
|
||||
|
||||
# Add alternate binary cache
|
||||
- if command -v nix > /dev/null && [ -n "$ATTIC_ENDPOINT" ]; then echo "extra-substituters = $ATTIC_ENDPOINT" >> /etc/nix/nix.conf; fi
|
||||
- if command -v nix > /dev/null && [ -n "$ATTIC_PUBLIC_KEY" ]; then echo "extra-trusted-public-keys = $ATTIC_PUBLIC_KEY" >> /etc/nix/nix.conf; fi
|
||||
|
||||
# Add crane binary cache
|
||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf; fi
|
||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf; fi
|
||||
|
||||
# Add nix-community binary cache
|
||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf; fi
|
||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf; fi
|
||||
|
||||
# Install direnv and nix-direnv
|
||||
- if command -v nix > /dev/null; then nix-env -iA nixpkgs.direnv nixpkgs.nix-direnv; fi
|
||||
|
||||
# Allow .envrc
|
||||
- if command -v nix > /dev/null; then direnv allow; fi
|
||||
|
||||
# Set CARGO_HOME to a cacheable path
|
||||
- export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo"
|
||||
|
||||
# Cache attic client
|
||||
- if command -v nix > /dev/null; then ./bin/nix-build-and-cache --inputs-from . attic; fi
|
||||
|
||||
ci:
|
||||
stage: ci
|
||||
image: nixos/nix:2.22.0
|
||||
script:
|
||||
# Cache the inputs required for the devShell
|
||||
- ./bin/nix-build-and-cache .#devShells.x86_64-linux.default.inputDerivation
|
||||
|
||||
- direnv exec . engage
|
||||
cache:
|
||||
key: nix
|
||||
paths:
|
||||
- target
|
||||
- .gitlab-ci.d
|
||||
rules:
|
||||
# CI on upstream runners (only available for maintainers)
|
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event" && $IS_UPSTREAM_CI == "true"
|
||||
# Manual CI on unprotected branches that are not MRs
|
||||
- if: $CI_PIPELINE_SOURCE != "merge_request_event" && $CI_COMMIT_REF_PROTECTED == "false"
|
||||
when: manual
|
||||
# Manual CI on forks
|
||||
- if: $IS_UPSTREAM_CI != "true"
|
||||
when: manual
|
||||
- if: $CI
|
||||
interruptible: true
|
||||
|
||||
artifacts:
|
||||
stage: artifacts
|
||||
image: nixos/nix:2.22.0
|
||||
script:
|
||||
- ./bin/nix-build-and-cache .#static-x86_64-unknown-linux-musl
|
||||
- cp result/bin/conduit x86_64-unknown-linux-musl
|
||||
|
||||
- mkdir -p target/release
|
||||
- cp result/bin/conduit target/release
|
||||
- direnv exec . cargo deb --no-build
|
||||
- mv target/debian/*.deb x86_64-unknown-linux-musl.deb
|
||||
|
||||
# Since the OCI image package is based on the binary package, this has the
|
||||
# fun side effect of uploading the normal binary too. Conduit users who are
|
||||
# deploying with Nix can leverage this fact by adding our binary cache to
|
||||
# their systems.
|
||||
#
|
||||
# Note that although we have an `oci-image-x86_64-unknown-linux-musl`
|
||||
# output, we don't build it because it would be largely redundant to this
|
||||
# one since it's all containerized anyway.
|
||||
- ./bin/nix-build-and-cache .#oci-image
|
||||
- cp result oci-image-amd64.tar.gz
|
||||
|
||||
- ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl
|
||||
- cp result/bin/conduit aarch64-unknown-linux-musl
|
||||
|
||||
- mkdir -p target/aarch64-unknown-linux-musl/release
|
||||
- cp result/bin/conduit target/aarch64-unknown-linux-musl/release
|
||||
- direnv exec . cargo deb --no-strip --no-build --target aarch64-unknown-linux-musl
|
||||
- mv target/aarch64-unknown-linux-musl/debian/*.deb aarch64-unknown-linux-musl.deb
|
||||
|
||||
- ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl
|
||||
- cp result oci-image-arm64v8.tar.gz
|
||||
|
||||
- ./bin/nix-build-and-cache .#book
|
||||
# We can't just copy the symlink, we need to dereference it https://gitlab.com/gitlab-org/gitlab/-/issues/19746
|
||||
- cp -r --dereference result public
|
||||
artifacts:
|
||||
paths:
|
||||
- x86_64-unknown-linux-musl
|
||||
- aarch64-unknown-linux-musl
|
||||
- x86_64-unknown-linux-musl.deb
|
||||
- aarch64-unknown-linux-musl.deb
|
||||
- oci-image-amd64.tar.gz
|
||||
- oci-image-arm64v8.tar.gz
|
||||
- public
|
||||
rules:
|
||||
# CI required for all MRs
|
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||
# Optional CI on forks
|
||||
- if: $IS_UPSTREAM_CI != "true"
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: $CI
|
||||
interruptible: true
|
||||
|
||||
.push-oci-image:
|
||||
stage: publish
|
||||
image: docker:25.0.0
|
||||
services:
|
||||
- docker:25.0.0-dind
|
||||
variables:
|
||||
IMAGE_SUFFIX_AMD64: amd64
|
||||
IMAGE_SUFFIX_ARM64V8: arm64v8
|
||||
script:
|
||||
- docker load -i oci-image-amd64.tar.gz
|
||||
- IMAGE_ID_AMD64=$(docker images -q conduit:next)
|
||||
- docker load -i oci-image-arm64v8.tar.gz
|
||||
- IMAGE_ID_ARM64V8=$(docker images -q conduit:next)
|
||||
# Tag and push the architecture specific images
|
||||
- docker tag $IMAGE_ID_AMD64 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64
|
||||
- docker tag $IMAGE_ID_ARM64V8 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
||||
- docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64
|
||||
- docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
||||
# Tag the multi-arch image
|
||||
- docker manifest create $IMAGE_NAME:$CI_COMMIT_SHA --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
||||
- docker manifest push $IMAGE_NAME:$CI_COMMIT_SHA
|
||||
# Tag and push the git ref
|
||||
- docker manifest create $IMAGE_NAME:$CI_COMMIT_REF_NAME --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
||||
- docker manifest push $IMAGE_NAME:$CI_COMMIT_REF_NAME
|
||||
# Tag git tags as 'latest'
|
||||
- |
|
||||
if [[ -n "$CI_COMMIT_TAG" ]]; then
|
||||
docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
||||
docker manifest push $IMAGE_NAME:latest
|
||||
fi
|
||||
dependencies:
|
||||
- artifacts
|
||||
only:
|
||||
- next
|
||||
- master
|
||||
- tags
|
||||
|
||||
oci-image:push-gitlab:
|
||||
extends: .push-oci-image
|
||||
variables:
|
||||
IMAGE_NAME: $CI_REGISTRY_IMAGE/matrix-conduit
|
||||
before_script:
|
||||
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
||||
|
||||
oci-image:push-dockerhub:
|
||||
extends: .push-oci-image
|
||||
variables:
|
||||
IMAGE_NAME: matrixconduit/matrix-conduit
|
||||
before_script:
|
||||
- docker login -u $DOCKER_HUB_USER -p $DOCKER_HUB_PASSWORD
|
||||
|
||||
pages:
|
||||
stage: publish
|
||||
dependencies:
|
||||
- artifacts
|
||||
only:
|
||||
- next
|
||||
script:
|
||||
- "true"
|
||||
artifacts:
|
||||
paths:
|
||||
- public
|
||||
|
|
3
.gitlab/route-map.yml
Normal file
3
.gitlab/route-map.yml
Normal file
|
@ -0,0 +1,3 @@
|
|||
# Docs: Map markdown to html files
|
||||
- source: /docs/(.+)\.md/
|
||||
public: '\1.html'
|
2416
Cargo.lock
generated
2416
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
216
Cargo.toml
216
Cargo.toml
|
@ -1,34 +1,51 @@
|
|||
[package]
|
||||
name = "conduit"
|
||||
description = "A Matrix homeserver written in Rust"
|
||||
license = "Apache-2.0"
|
||||
authors = ["timokoesters <timo@koesters.xyz>"]
|
||||
homepage = "https://conduit.rs"
|
||||
repository = "https://gitlab.com/famedly/conduit"
|
||||
readme = "README.md"
|
||||
version = "0.7.0-alpha"
|
||||
edition = "2021"
|
||||
[workspace.lints.rust]
|
||||
explicit_outlives_requirements = "warn"
|
||||
unused_qualifications = "warn"
|
||||
|
||||
# When changing this, make sure to update the `flake.lock` file by running
|
||||
# `nix flake update`. If you don't have Nix installed or otherwise don't know
|
||||
# how to do this, ping `@charles:computer.surgery` or `@dusk:gaze.systems` in
|
||||
# the matrix room.
|
||||
rust-version = "1.70.0"
|
||||
[workspace.lints.clippy]
|
||||
cloned_instead_of_copied = "warn"
|
||||
dbg_macro = "warn"
|
||||
str_to_string = "warn"
|
||||
|
||||
[package]
|
||||
authors = ["timokoesters <timo@koesters.xyz>"]
|
||||
description = "A Matrix homeserver written in Rust"
|
||||
edition = "2021"
|
||||
homepage = "https://conduit.rs"
|
||||
license = "Apache-2.0"
|
||||
name = "conduit"
|
||||
readme = "README.md"
|
||||
repository = "https://gitlab.com/famedly/conduit"
|
||||
version = "0.10.0-alpha"
|
||||
|
||||
# See also `rust-toolchain.toml`
|
||||
rust-version = "1.79.0"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Web framework
|
||||
axum = { version = "0.6.18", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true }
|
||||
axum-server = { version = "0.5.1", features = ["tls-rustls"] }
|
||||
axum = { version = "0.7", default-features = false, features = [
|
||||
"form",
|
||||
"http1",
|
||||
"http2",
|
||||
"json",
|
||||
"matched-path",
|
||||
], optional = true }
|
||||
axum-extra = { version = "0.9", features = ["typed-header"] }
|
||||
axum-server = { version = "0.6", features = ["tls-rustls"] }
|
||||
tower = { version = "0.4.13", features = ["util"] }
|
||||
tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitive-headers", "trace", "util"] }
|
||||
|
||||
# Used for matrix spec type definitions and helpers
|
||||
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
||||
ruma = { git = "https://github.com/ruma/ruma", rev = "b4853aa8fa5e3a24e3689fc88044de9915f6ab67", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
||||
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
||||
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
||||
tower-http = { version = "0.5", features = [
|
||||
"add-extension",
|
||||
"cors",
|
||||
"sensitive-headers",
|
||||
"trace",
|
||||
"util",
|
||||
] }
|
||||
tower-service = "0.3"
|
||||
|
||||
# Async runtime and utilities
|
||||
tokio = { version = "1.28.1", features = ["fs", "macros", "signal", "sync"] }
|
||||
|
@ -39,9 +56,9 @@ persy = { version = "1.4.4", optional = true, features = ["background_ops"] }
|
|||
|
||||
# Used for the http request / response body type for Ruma endpoints used with reqwest
|
||||
bytes = "1.4.0"
|
||||
http = "0.2.9"
|
||||
http = "1"
|
||||
# Used to find data directory for default db path
|
||||
directories = "4.0.1"
|
||||
directories = "5"
|
||||
# Used for ruma wrapper
|
||||
serde_json = { version = "1.0.96", features = ["raw_value"] }
|
||||
# Used for appservice registration files
|
||||
|
@ -51,72 +68,123 @@ serde = { version = "1.0.163", features = ["rc"] }
|
|||
# Used for secure identifiers
|
||||
rand = "0.8.5"
|
||||
# Used to hash passwords
|
||||
rust-argon2 = "1.0.0"
|
||||
rust-argon2 = "2"
|
||||
# Used to send requests
|
||||
reqwest = { default-features = false, features = ["rustls-tls-native-roots", "socks"], git = "https://github.com/timokoesters/reqwest", rev = "57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" }
|
||||
hyper = "1.1"
|
||||
hyper-util = { version = "0.1", features = [
|
||||
"client",
|
||||
"client-legacy",
|
||||
"http1",
|
||||
"http2",
|
||||
] }
|
||||
reqwest = { version = "0.12", default-features = false, features = [
|
||||
"rustls-tls-native-roots",
|
||||
"socks",
|
||||
] }
|
||||
# Used for conduit::Error type
|
||||
thiserror = "1.0.40"
|
||||
# Used to generate thumbnails for images
|
||||
image = { version = "0.24.6", default-features = false, features = ["jpeg", "png", "gif"] }
|
||||
image = { version = "0.25", default-features = false, features = [
|
||||
"gif",
|
||||
"jpeg",
|
||||
"png",
|
||||
] }
|
||||
# Used to encode server public key
|
||||
base64 = "0.21.2"
|
||||
base64 = "0.22"
|
||||
# Used when hashing the state
|
||||
ring = "0.16.20"
|
||||
ring = "0.17.7"
|
||||
# Used when querying the SRV record of other servers
|
||||
trust-dns-resolver = "0.22.0"
|
||||
hickory-resolver = "0.24"
|
||||
# Used to find matching events for appservices
|
||||
regex = "1.8.1"
|
||||
# jwt jsonwebtokens
|
||||
jsonwebtoken = "8.3.0"
|
||||
jsonwebtoken = "9.2.0"
|
||||
# Performance measurements
|
||||
tracing = { version = "0.1.37", features = [] }
|
||||
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
|
||||
opentelemetry = "0.22"
|
||||
opentelemetry-jaeger-propagator = "0.1"
|
||||
opentelemetry-otlp = "0.15"
|
||||
opentelemetry_sdk = { version = "0.22", features = ["rt-tokio"] }
|
||||
tracing = "0.1.37"
|
||||
tracing-flame = "0.2.0"
|
||||
opentelemetry = { version = "0.18.0", features = ["rt-tokio"] }
|
||||
opentelemetry-jaeger = { version = "0.17.0", features = ["rt-tokio"] }
|
||||
tracing-opentelemetry = "0.18.0"
|
||||
tracing-opentelemetry = "0.23"
|
||||
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
|
||||
|
||||
lru-cache = "0.1.2"
|
||||
rusqlite = { version = "0.29.0", optional = true, features = ["bundled"] }
|
||||
parking_lot = { version = "0.12.1", optional = true }
|
||||
crossbeam = { version = "0.8.2", optional = true }
|
||||
rusqlite = { version = "0.31", optional = true, features = ["bundled"] }
|
||||
|
||||
# crossbeam = { version = "0.8.2", optional = true }
|
||||
num_cpus = "1.15.0"
|
||||
threadpool = "1.8.1"
|
||||
heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
|
||||
# heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
|
||||
# Used for ruma wrapper
|
||||
serde_html_form = "0.2.0"
|
||||
|
||||
rocksdb = { version = "0.21.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true }
|
||||
|
||||
thread_local = "1.1.7"
|
||||
# used for TURN server authentication
|
||||
hmac = "0.12.1"
|
||||
sha-1 = "0.10.1"
|
||||
# used for conduit's CLI and admin room command parsing
|
||||
clap = { version = "4.3.0", default-features = false, features = ["std", "derive", "help", "usage", "error-context"] }
|
||||
clap = { version = "4.3.0", default-features = false, features = [
|
||||
"derive",
|
||||
"error-context",
|
||||
"help",
|
||||
"std",
|
||||
"string",
|
||||
"usage",
|
||||
] }
|
||||
futures-util = { version = "0.3.28", default-features = false }
|
||||
# Used for reading the configuration from conduit.toml & environment variables
|
||||
figment = { version = "0.10.8", features = ["env", "toml"] }
|
||||
|
||||
tikv-jemalloc-ctl = { version = "0.5.0", features = ["use_std"], optional = true }
|
||||
tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_supported_platforms"], optional = true }
|
||||
lazy_static = "1.4.0"
|
||||
# Validating urls in config
|
||||
url = { version = "2", features = ["serde"] }
|
||||
|
||||
async-trait = "0.1.68"
|
||||
tikv-jemallocator = { version = "0.5.0", features = [
|
||||
"unprefixed_malloc_on_supported_platforms",
|
||||
], optional = true }
|
||||
|
||||
sd-notify = { version = "0.4.1", optional = true }
|
||||
|
||||
# Used for matrix spec type definitions and helpers
|
||||
[dependencies.ruma]
|
||||
features = [
|
||||
"appservice-api-c",
|
||||
"client-api",
|
||||
"compat",
|
||||
"federation-api",
|
||||
"push-gateway-api-c",
|
||||
"rand",
|
||||
"ring-compat",
|
||||
"server-util",
|
||||
"state-res",
|
||||
"unstable-exhaustive-types",
|
||||
"unstable-msc2448",
|
||||
"unstable-msc3575",
|
||||
"unstable-unspecified",
|
||||
]
|
||||
git = "https://github.com/ruma/ruma"
|
||||
|
||||
[dependencies.rocksdb]
|
||||
features = ["lz4", "multi-threaded-cf", "zstd"]
|
||||
optional = true
|
||||
package = "rust-rocksdb"
|
||||
version = "0.25"
|
||||
|
||||
[target.'cfg(unix)'.dependencies]
|
||||
nix = { version = "0.26.2", features = ["resource"] }
|
||||
nix = { version = "0.28", features = ["resource"] }
|
||||
|
||||
[features]
|
||||
default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "systemd"]
|
||||
default = ["backend_rocksdb", "backend_sqlite", "conduit_bin", "systemd"]
|
||||
#backend_sled = ["sled"]
|
||||
backend_persy = ["persy", "parking_lot"]
|
||||
backend_persy = ["parking_lot", "persy"]
|
||||
backend_sqlite = ["sqlite"]
|
||||
backend_heed = ["heed", "crossbeam"]
|
||||
#backend_heed = ["heed", "crossbeam"]
|
||||
backend_rocksdb = ["rocksdb"]
|
||||
jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"]
|
||||
sqlite = ["rusqlite", "parking_lot", "tokio/signal"]
|
||||
conduit_bin = ["axum"]
|
||||
jemalloc = ["tikv-jemallocator"]
|
||||
sqlite = ["parking_lot", "rusqlite", "tokio/signal"]
|
||||
systemd = ["sd-notify"]
|
||||
|
||||
[[bin]]
|
||||
|
@ -129,35 +197,45 @@ name = "conduit"
|
|||
path = "src/lib.rs"
|
||||
|
||||
[package.metadata.deb]
|
||||
name = "matrix-conduit"
|
||||
maintainer = "Paul van Tilburg <paul@luon.net>"
|
||||
assets = [
|
||||
[
|
||||
"README.md",
|
||||
"usr/share/doc/matrix-conduit/",
|
||||
"644",
|
||||
],
|
||||
[
|
||||
"debian/README.md",
|
||||
"usr/share/doc/matrix-conduit/README.Debian",
|
||||
"644",
|
||||
],
|
||||
[
|
||||
"target/release/conduit",
|
||||
"usr/sbin/matrix-conduit",
|
||||
"755",
|
||||
],
|
||||
]
|
||||
conf-files = ["/etc/matrix-conduit/conduit.toml"]
|
||||
copyright = "2020, Timo Kösters <timo@koesters.xyz>"
|
||||
license-file = ["LICENSE", "3"]
|
||||
depends = "$auto, ca-certificates"
|
||||
extended-description = """\
|
||||
A fast Matrix homeserver that is optimized for smaller, personal servers, \
|
||||
instead of a server that has high scalability."""
|
||||
section = "net"
|
||||
priority = "optional"
|
||||
assets = [
|
||||
["debian/README.md", "usr/share/doc/matrix-conduit/README.Debian", "644"],
|
||||
["README.md", "usr/share/doc/matrix-conduit/", "644"],
|
||||
["target/release/conduit", "usr/sbin/matrix-conduit", "755"],
|
||||
]
|
||||
conf-files = [
|
||||
"/etc/matrix-conduit/conduit.toml"
|
||||
]
|
||||
license-file = ["LICENSE", "3"]
|
||||
maintainer = "Paul van Tilburg <paul@luon.net>"
|
||||
maintainer-scripts = "debian/"
|
||||
name = "matrix-conduit"
|
||||
priority = "optional"
|
||||
section = "net"
|
||||
systemd-units = { unit-name = "matrix-conduit" }
|
||||
|
||||
[profile.dev]
|
||||
lto = 'off'
|
||||
incremental = true
|
||||
lto = 'off'
|
||||
|
||||
[profile.release]
|
||||
lto = 'thin'
|
||||
incremental = true
|
||||
codegen-units = 32
|
||||
incremental = true
|
||||
lto = 'thin'
|
||||
# If you want to make flamegraphs, enable debug info:
|
||||
# debug = true
|
||||
|
||||
|
|
132
Dockerfile
132
Dockerfile
|
@ -1,132 +0,0 @@
|
|||
# syntax=docker/dockerfile:1
|
||||
FROM docker.io/rust:1.70-bullseye AS base
|
||||
|
||||
FROM base AS builder
|
||||
WORKDIR /usr/src/conduit
|
||||
|
||||
# Install required packages to build Conduit and it's dependencies
|
||||
RUN apt-get update && \
|
||||
apt-get -y --no-install-recommends install libclang-dev=1:11.0-51+nmu5
|
||||
|
||||
# == Build dependencies without our own code separately for caching ==
|
||||
#
|
||||
# Need a fake main.rs since Cargo refuses to build anything otherwise.
|
||||
#
|
||||
# See https://github.com/rust-lang/cargo/issues/2644 for a Cargo feature
|
||||
# request that would allow just dependencies to be compiled, presumably
|
||||
# regardless of whether source files are available.
|
||||
RUN mkdir src && touch src/lib.rs && echo 'fn main() {}' > src/main.rs
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
RUN cargo build --release && rm -r src
|
||||
|
||||
# Copy over actual Conduit sources
|
||||
COPY src src
|
||||
|
||||
# main.rs and lib.rs need their timestamp updated for this to work correctly since
|
||||
# otherwise the build with the fake main.rs from above is newer than the
|
||||
# source files (COPY preserves timestamps).
|
||||
#
|
||||
# Builds conduit and places the binary at /usr/src/conduit/target/release/conduit
|
||||
RUN touch src/main.rs && touch src/lib.rs && cargo build --release
|
||||
|
||||
|
||||
# ONLY USEFUL FOR CI: target stage to extract build artifacts
|
||||
FROM scratch AS builder-result
|
||||
COPY --from=builder /usr/src/conduit/target/release/conduit /conduit
|
||||
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------------------------------------------
|
||||
# Build cargo-deb, a tool to package up rust binaries into .deb packages for Debian/Ubuntu based systems:
|
||||
# ---------------------------------------------------------------------------------------------------------------
|
||||
FROM base AS build-cargo-deb
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
dpkg \
|
||||
dpkg-dev \
|
||||
liblzma-dev
|
||||
|
||||
RUN cargo install cargo-deb
|
||||
# => binary is in /usr/local/cargo/bin/cargo-deb
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------------------------------------------
|
||||
# Package conduit build-result into a .deb package:
|
||||
# ---------------------------------------------------------------------------------------------------------------
|
||||
FROM builder AS packager
|
||||
WORKDIR /usr/src/conduit
|
||||
|
||||
COPY ./LICENSE ./LICENSE
|
||||
COPY ./README.md ./README.md
|
||||
COPY debian ./debian
|
||||
COPY --from=build-cargo-deb /usr/local/cargo/bin/cargo-deb /usr/local/cargo/bin/cargo-deb
|
||||
|
||||
# --no-build makes cargo-deb reuse already compiled project
|
||||
RUN cargo deb --no-build
|
||||
# => Package is in /usr/src/conduit/target/debian/<project_name>_<version>_<arch>.deb
|
||||
|
||||
|
||||
# ONLY USEFUL FOR CI: target stage to extract build artifacts
|
||||
FROM scratch AS packager-result
|
||||
COPY --from=packager /usr/src/conduit/target/debian/*.deb /conduit.deb
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------------------------------------------
|
||||
# Stuff below this line actually ends up in the resulting docker image
|
||||
# ---------------------------------------------------------------------------------------------------------------
|
||||
FROM docker.io/debian:bullseye-slim AS runner
|
||||
|
||||
# Standard port on which Conduit launches.
|
||||
# You still need to map the port when using the docker command or docker-compose.
|
||||
EXPOSE 6167
|
||||
|
||||
ARG DEFAULT_DB_PATH=/var/lib/matrix-conduit
|
||||
|
||||
ENV CONDUIT_PORT=6167 \
|
||||
CONDUIT_ADDRESS="0.0.0.0" \
|
||||
CONDUIT_DATABASE_PATH=${DEFAULT_DB_PATH} \
|
||||
CONDUIT_CONFIG=''
|
||||
# └─> Set no config file to do all configuration with env vars
|
||||
|
||||
# Conduit needs:
|
||||
# dpkg: to install conduit.deb
|
||||
# ca-certificates: for https
|
||||
# iproute2 & wget: for the healthcheck script
|
||||
RUN apt-get update && apt-get -y --no-install-recommends install \
|
||||
dpkg \
|
||||
ca-certificates \
|
||||
iproute2 \
|
||||
wget \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Test if Conduit is still alive, uses the same endpoint as Element
|
||||
COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh
|
||||
HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
|
||||
|
||||
# Install conduit.deb:
|
||||
COPY --from=packager /usr/src/conduit/target/debian/*.deb /srv/conduit/
|
||||
RUN dpkg -i /srv/conduit/*.deb
|
||||
|
||||
# Improve security: Don't run stuff as root, that does not need to run as root
|
||||
# Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems.
|
||||
ARG USER_ID=1000
|
||||
ARG GROUP_ID=1000
|
||||
RUN set -x ; \
|
||||
groupadd -r -g ${GROUP_ID} conduit ; \
|
||||
useradd -l -r -M -d /srv/conduit -o -u ${USER_ID} -g conduit conduit && exit 0 ; exit 1
|
||||
|
||||
# Create database directory, change ownership of Conduit files to conduit user and group and make the healthcheck executable:
|
||||
RUN chown -cR conduit:conduit /srv/conduit && \
|
||||
chmod +x /srv/conduit/healthcheck.sh && \
|
||||
mkdir -p ${DEFAULT_DB_PATH} && \
|
||||
chown -cR conduit:conduit ${DEFAULT_DB_PATH}
|
||||
|
||||
# Change user to conduit, no root permissions afterwards:
|
||||
USER conduit
|
||||
# Set container home directory
|
||||
WORKDIR /srv/conduit
|
||||
|
||||
# Run Conduit and print backtraces on panics
|
||||
ENV RUST_BACKTRACE=1
|
||||
ENTRYPOINT [ "/usr/sbin/matrix-conduit" ]
|
67
README.md
67
README.md
|
@ -1,7 +1,15 @@
|
|||
# Conduit
|
||||
### A Matrix homeserver written in Rust
|
||||
|
||||
<!-- ANCHOR: catchphrase -->
|
||||
### A Matrix homeserver written in Rust
|
||||
<!-- ANCHOR_END: catchphrase -->
|
||||
|
||||
Please visit the [Conduit documentation](https://famedly.gitlab.io/conduit) for more information.
|
||||
Alternatively you can open [docs/introduction.md](docs/introduction.md) in this repository.
|
||||
|
||||
<!-- ANCHOR: body -->
|
||||
#### What is Matrix?
|
||||
|
||||
[Matrix](https://matrix.org) is an open network for secure and decentralized
|
||||
communication. Users from every Matrix homeserver can chat with users from all
|
||||
other Matrix servers. You can even use bridges (also called Matrix appservices)
|
||||
|
@ -15,8 +23,7 @@ friends or company.
|
|||
|
||||
#### Can I try it out?
|
||||
|
||||
Yes! You can test our Conduit instance by opening a Matrix client (<https://app.element.io> or Element Android for
|
||||
example) and registering on the `conduit.rs` homeserver. The registration token is "for_testing_only". Don't share personal information.
|
||||
Yes! You can test our Conduit instance by opening a client that supports registration tokens such as [Element web](https://app.element.io/), [Nheko](https://matrix.org/ecosystem/clients/nheko/) or [SchildiChat web](https://app.schildi.chat/) and registering on the `conduit.rs` homeserver. The registration token is "for_testing_only". Don't share personal information. Once you have registered, you can use any other [Matrix client](https://matrix.org/ecosystem/clients) to login.
|
||||
|
||||
Server hosting for conduit.rs is donated by the Matrix.org Foundation.
|
||||
|
||||
|
@ -30,27 +37,32 @@ There are still a few important features missing:
|
|||
|
||||
- E2EE emoji comparison over federation (E2EE chat works)
|
||||
- Outgoing read receipts, typing, presence over federation (incoming works)
|
||||
<!-- ANCHOR_END: body -->
|
||||
|
||||
Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit/-/milestones/3).
|
||||
|
||||
#### How can I deploy my own?
|
||||
|
||||
- Simple install (this was tested the most): [DEPLOY.md](DEPLOY.md)
|
||||
- Debian package: [debian/README.md](debian/README.md)
|
||||
- Nix/NixOS: [nix/README.md](nix/README.md)
|
||||
- Docker: [docker/README.md](docker/README.md)
|
||||
|
||||
If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md](APPSERVICES.md).
|
||||
|
||||
<!-- ANCHOR: footer -->
|
||||
#### How can I contribute?
|
||||
|
||||
1. Look for an issue you would like to work on and make sure it's not assigned
|
||||
to other users
|
||||
2. Ask someone to assign the issue to you (comment on the issue or chat in
|
||||
[#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org))
|
||||
3. Fork the repo and work on the issue.[#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org) is happy to help :)
|
||||
1. Look for an issue you would like to work on and make sure no one else is currently working on it.
|
||||
2. Tell us that you are working on the issue (comment on the issue or chat in
|
||||
[#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org)). If it is more complicated, please explain your approach and ask questions.
|
||||
3. Fork the repo, create a new branch and push commits.
|
||||
4. Submit a MR
|
||||
|
||||
#### Contact
|
||||
|
||||
If you have any questions, feel free to
|
||||
- Ask in `#conduit:fachschaften.org` on Matrix
|
||||
- Write an E-Mail to `conduit@koesters.xyz`
|
||||
- Send an direct message to `@timokoesters:fachschaften.org` on Matrix
|
||||
- [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new)
|
||||
|
||||
#### Security
|
||||
|
||||
If you believe you have found a security issue, please send a message to [Timo](https://matrix.to/#/@timo:conduit.rs)
|
||||
and/or [Matthias](https://matrix.to/#/@matthias:ahouansou.cz) on Matrix, or send an email to
|
||||
[conduit@koesters.xyz](mailto:conduit@koesters.xyz). Please do not disclose details about the issue to anyone else before
|
||||
a fix is released publically.
|
||||
|
||||
#### Thanks to
|
||||
|
||||
Thanks to FUTO, Famedly, Prototype Fund (DLR and German BMBF) and all individuals for financially supporting this project.
|
||||
|
@ -60,20 +72,13 @@ Thanks to the contributors to Conduit and all libraries we use, for example:
|
|||
- Ruma: A clean library for the Matrix Spec in Rust
|
||||
- axum: A modular web framework
|
||||
|
||||
#### Contact
|
||||
|
||||
If you run into any question, feel free to
|
||||
- Ask us in `#conduit:fachschaften.org` on Matrix
|
||||
- Write an E-Mail to `conduit@koesters.xyz`
|
||||
- Send an direct message to `timokoesters@fachschaften.org` on Matrix
|
||||
- [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new)
|
||||
|
||||
#### Donate
|
||||
|
||||
Liberapay: <https://liberapay.com/timokoesters/>\
|
||||
Bitcoin: `bc1qnnykf986tw49ur7wx9rpw2tevpsztvar5x8w4n`
|
||||
- Liberapay: <https://liberapay.com/timokoesters/>
|
||||
- Bitcoin: `bc1qnnykf986tw49ur7wx9rpw2tevpsztvar5x8w4n`
|
||||
|
||||
#### Logo
|
||||
|
||||
Lightning Bolt Logo: https://github.com/mozilla/fxemoji/blob/gh-pages/svgs/nature/u26A1-bolt.svg \
|
||||
Logo License: https://github.com/mozilla/fxemoji/blob/gh-pages/LICENSE.md
|
||||
- Lightning Bolt Logo: <https://github.com/mozilla/fxemoji/blob/gh-pages/svgs/nature/u26A1-bolt.svg>
|
||||
- Logo License: <https://github.com/mozilla/fxemoji/blob/gh-pages/LICENSE.md>
|
||||
<!-- ANCHOR_END: footer -->
|
||||
|
|
37
bin/complement
Executable file
37
bin/complement
Executable file
|
@ -0,0 +1,37 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Path to Complement's source code
|
||||
COMPLEMENT_SRC="$1"
|
||||
|
||||
# A `.jsonl` file to write test logs to
|
||||
LOG_FILE="$2"
|
||||
|
||||
# A `.jsonl` file to write test results to
|
||||
RESULTS_FILE="$3"
|
||||
|
||||
OCI_IMAGE="complement-conduit:dev"
|
||||
|
||||
env \
|
||||
-C "$(git rev-parse --show-toplevel)" \
|
||||
docker build \
|
||||
--tag "$OCI_IMAGE" \
|
||||
--file complement/Dockerfile \
|
||||
.
|
||||
|
||||
# It's okay (likely, even) that `go test` exits nonzero
|
||||
set +o pipefail
|
||||
env \
|
||||
-C "$COMPLEMENT_SRC" \
|
||||
COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \
|
||||
go test -json ./tests | tee "$LOG_FILE"
|
||||
set -o pipefail
|
||||
|
||||
# Post-process the results into an easy-to-compare format
|
||||
cat "$LOG_FILE" | jq -c '
|
||||
select(
|
||||
(.Action == "pass" or .Action == "fail" or .Action == "skip")
|
||||
and .Test != null
|
||||
) | {Action: .Action, Test: .Test}
|
||||
' | sort > "$RESULTS_FILE"
|
40
bin/nix-build-and-cache
Executable file
40
bin/nix-build-and-cache
Executable file
|
@ -0,0 +1,40 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Build the installable and forward any other arguments too. Also, use
|
||||
# nix-output-monitor instead if it's available.
|
||||
if command -v nom &> /dev/null; then
|
||||
nom build "$@"
|
||||
else
|
||||
nix build "$@"
|
||||
fi
|
||||
|
||||
if [ ! -z ${ATTIC_TOKEN+x} ]; then
|
||||
nix run --inputs-from . attic -- \
|
||||
login \
|
||||
conduit \
|
||||
"${ATTIC_ENDPOINT:-https://attic.conduit.rs/conduit}" \
|
||||
"$ATTIC_TOKEN"
|
||||
|
||||
readarray -t derivations < <(nix path-info "$@" --derivation)
|
||||
for derivation in "${derivations[@]}"; do
|
||||
cache+=(
|
||||
"$(nix-store --query --requisites --include-outputs "$derivation")"
|
||||
)
|
||||
done
|
||||
|
||||
# Upload them to Attic
|
||||
#
|
||||
# Use `xargs` and a here-string because something would probably explode if
|
||||
# several thousand arguments got passed to a command at once. Hopefully no
|
||||
# store paths include a newline in them.
|
||||
(
|
||||
IFS=$'\n'
|
||||
nix shell --inputs-from . attic -c xargs \
|
||||
attic push conduit <<< "${cache[*]}"
|
||||
)
|
||||
|
||||
else
|
||||
echo "\$ATTIC_TOKEN is unset, skipping uploading to the binary cache"
|
||||
fi
|
21
book.toml
Normal file
21
book.toml
Normal file
|
@ -0,0 +1,21 @@
|
|||
[book]
|
||||
description = "Conduit is a simple, fast and reliable chat server for the Matrix protocol"
|
||||
language = "en"
|
||||
multilingual = false
|
||||
src = "docs"
|
||||
title = "Conduit"
|
||||
|
||||
[build]
|
||||
build-dir = "public"
|
||||
create-missing = true
|
||||
|
||||
[output.html]
|
||||
edit-url-template = "https://gitlab.com/famedly/conduit/-/edit/next/{path}"
|
||||
git-repository-icon = "fa-git-square"
|
||||
git-repository-url = "https://gitlab.com/famedly/conduit"
|
||||
|
||||
[output.html.search]
|
||||
limit-results = 15
|
||||
|
||||
[output.html.code.hidelines]
|
||||
json = "~"
|
|
@ -1,26 +1,30 @@
|
|||
# For use in our CI only. This requires a build artifact created by a previous run pipline stage to be placed in cached_target/release/conduit
|
||||
FROM registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:commit-16a08e9b as builder
|
||||
#FROM rust:latest as builder
|
||||
FROM rust:1.79.0
|
||||
|
||||
WORKDIR /workdir
|
||||
|
||||
ARG RUSTC_WRAPPER
|
||||
ARG AWS_ACCESS_KEY_ID
|
||||
ARG AWS_SECRET_ACCESS_KEY
|
||||
ARG SCCACHE_BUCKET
|
||||
ARG SCCACHE_ENDPOINT
|
||||
ARG SCCACHE_S3_USE_SSL
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libclang-dev
|
||||
|
||||
COPY . .
|
||||
RUN mkdir -p target/release
|
||||
RUN test -e cached_target/release/conduit && cp cached_target/release/conduit target/release/conduit || cargo build --release
|
||||
|
||||
## Actual image
|
||||
FROM debian:bullseye
|
||||
WORKDIR /workdir
|
||||
COPY Cargo.toml Cargo.toml
|
||||
COPY Cargo.lock Cargo.lock
|
||||
COPY src src
|
||||
RUN cargo build --release \
|
||||
&& mv target/release/conduit conduit \
|
||||
&& rm -rf target
|
||||
|
||||
# Install caddy
|
||||
RUN apt-get update && apt-get install -y debian-keyring debian-archive-keyring apt-transport-https curl && curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/gpg.key' | gpg --dearmor -o /usr/share/keyrings/caddy-testing-archive-keyring.gpg && curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/debian.deb.txt' | tee /etc/apt/sources.list.d/caddy-testing.list && apt-get update && apt-get install -y caddy
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
debian-keyring \
|
||||
debian-archive-keyring \
|
||||
apt-transport-https \
|
||||
curl \
|
||||
&& curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/gpg.key' \
|
||||
| gpg --dearmor -o /usr/share/keyrings/caddy-testing-archive-keyring.gpg \
|
||||
&& curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/debian.deb.txt' \
|
||||
| tee /etc/apt/sources.list.d/caddy-testing.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y caddy
|
||||
|
||||
COPY conduit-example.toml conduit.toml
|
||||
COPY complement/caddy.json caddy.json
|
||||
|
@ -29,16 +33,9 @@ ENV SERVER_NAME=localhost
|
|||
ENV CONDUIT_CONFIG=/workdir/conduit.toml
|
||||
|
||||
RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml
|
||||
RUN echo "allow_federation = true" >> conduit.toml
|
||||
RUN echo "allow_check_for_updates = true" >> conduit.toml
|
||||
RUN echo "allow_encryption = true" >> conduit.toml
|
||||
RUN echo "allow_registration = true" >> conduit.toml
|
||||
RUN echo "log = \"warn,_=off,sled=off\"" >> conduit.toml
|
||||
RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml
|
||||
|
||||
COPY --from=builder /workdir/target/release/conduit /workdir/conduit
|
||||
RUN chmod +x /workdir/conduit
|
||||
|
||||
EXPOSE 8008 8448
|
||||
|
||||
CMD uname -a && \
|
||||
|
|
|
@ -1,13 +1,11 @@
|
|||
# Running Conduit on Complement
|
||||
# Complement
|
||||
|
||||
This assumes that you're familiar with complement, if not, please readme
|
||||
[their readme](https://github.com/matrix-org/complement#running).
|
||||
## What's that?
|
||||
|
||||
Complement works with "base images", this directory (and Dockerfile) helps build the conduit complement-ready docker
|
||||
image.
|
||||
Have a look at [its repository](https://github.com/matrix-org/complement).
|
||||
|
||||
To build, `cd` to the base directory of the workspace, and run this:
|
||||
## How do I use it with Conduit?
|
||||
|
||||
`docker build -t complement-conduit:dev -f complement/Dockerfile .`
|
||||
|
||||
Then use `complement-conduit:dev` as a base image for running complement tests.
|
||||
The script at [`../bin/complement`](../bin/complement) has automation for this.
|
||||
It takes a few command line arguments, you can read the script to find out what
|
||||
those are.
|
||||
|
|
|
@ -17,14 +17,14 @@
|
|||
# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client
|
||||
# and
|
||||
# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server
|
||||
# for more information
|
||||
# for more information, or continue below to see how conduit can do this for you.
|
||||
|
||||
# YOU NEED TO EDIT THIS
|
||||
#server_name = "your.server.name"
|
||||
|
||||
database_backend = "rocksdb"
|
||||
# This is the only directory where Conduit will save its data
|
||||
database_path = "/var/lib/matrix-conduit/"
|
||||
database_backend = "rocksdb"
|
||||
|
||||
# The port Conduit will be running on. You need to set up a reverse proxy in
|
||||
# your web server (e.g. apache or nginx), so all requests to /_matrix on port
|
||||
|
@ -38,8 +38,14 @@ max_request_size = 20_000_000 # in bytes
|
|||
# Enables registration. If set to false, no users can register on this server.
|
||||
allow_registration = true
|
||||
|
||||
allow_federation = true
|
||||
# A static registration token that new users will have to provide when creating
|
||||
# an account. YOU NEED TO EDIT THIS.
|
||||
# - Insert a password that users will have to enter on registration
|
||||
# - Start the line with '#' to remove the condition
|
||||
registration_token = ""
|
||||
|
||||
allow_check_for_updates = true
|
||||
allow_federation = true
|
||||
|
||||
# Enable the display name lightning bolt on registration.
|
||||
enable_lightning_bolt = true
|
||||
|
@ -51,7 +57,18 @@ enable_lightning_bolt = true
|
|||
trusted_servers = ["matrix.org"]
|
||||
|
||||
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||
#log = "warn,state_res=warn,rocket=off,_=off,sled=off"
|
||||
|
||||
# Controls the log verbosity. See also [here][0].
|
||||
#
|
||||
# [0]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives
|
||||
#log = "..."
|
||||
|
||||
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
||||
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
|
||||
|
||||
[global.well_known]
|
||||
# Conduit handles the /.well-known/matrix/* endpoints, making both clients and servers try to access conduit with the host
|
||||
# server_name and port 443 by default.
|
||||
# If you want to override these defaults, uncomment and edit the following lines accordingly:
|
||||
#server = your.server.name:443
|
||||
#client = https://your.server.name
|
||||
|
|
2
debian/README.md
vendored
2
debian/README.md
vendored
|
@ -5,7 +5,7 @@ Installation
|
|||
------------
|
||||
|
||||
Information about downloading, building and deploying the Debian package, see
|
||||
the "Installing Conduit" section in [DEPLOY.md](../DEPLOY.md).
|
||||
the "Installing Conduit" section in the Deploying docs.
|
||||
All following sections until "Setting up the Reverse Proxy" be ignored because
|
||||
this is handled automatically by the packaging.
|
||||
|
||||
|
|
19
debian/postinst
vendored
19
debian/postinst
vendored
|
@ -72,13 +72,30 @@ max_request_size = 20_000_000 # in bytes
|
|||
# Enables registration. If set to false, no users can register on this server.
|
||||
allow_registration = true
|
||||
|
||||
# A static registration token that new users will have to provide when creating
|
||||
# an account.
|
||||
# - Insert a password that users will have to enter on registration
|
||||
# - Start the line with '#' to remove the condition
|
||||
#registration_token = ""
|
||||
|
||||
allow_federation = true
|
||||
allow_check_for_updates = true
|
||||
|
||||
# Enable the display name lightning bolt on registration.
|
||||
enable_lightning_bolt = true
|
||||
|
||||
# Servers listed here will be used to gather public keys of other servers.
|
||||
# Generally, copying this exactly should be enough. (Currently, Conduit doesn't
|
||||
# support batched key requests, so this list should only contain Synapse
|
||||
# servers.)
|
||||
trusted_servers = ["matrix.org"]
|
||||
|
||||
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||
#log = "warn,state_res=warn,rocket=off,_=off,sled=off"
|
||||
|
||||
# Controls the log verbosity. See also [here][0].
|
||||
#
|
||||
# [0]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives
|
||||
#log = "..."
|
||||
EOF
|
||||
fi
|
||||
;;
|
||||
|
|
10
default.nix
Normal file
10
default.nix
Normal file
|
@ -0,0 +1,10 @@
|
|||
(import
|
||||
(
|
||||
let lock = builtins.fromJSON (builtins.readFile ./flake.lock); in
|
||||
fetchTarball {
|
||||
url = lock.nodes.flake-compat.locked.url or "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz";
|
||||
sha256 = lock.nodes.flake-compat.locked.narHash;
|
||||
}
|
||||
)
|
||||
{ src = ./.; }
|
||||
).defaultNix
|
14
docs/SUMMARY.md
Normal file
14
docs/SUMMARY.md
Normal file
|
@ -0,0 +1,14 @@
|
|||
# Summary
|
||||
|
||||
- [Introduction](introduction.md)
|
||||
|
||||
- [Configuration](configuration.md)
|
||||
- [Delegation](delegation.md)
|
||||
- [Deploying](deploying.md)
|
||||
- [Generic](deploying/generic.md)
|
||||
- [Debian](deploying/debian.md)
|
||||
- [Docker](deploying/docker.md)
|
||||
- [NixOS](deploying/nixos.md)
|
||||
- [TURN](turn.md)
|
||||
- [Appservices](appservices.md)
|
||||
- [FAQ](faq.md)
|
114
docs/configuration.md
Normal file
114
docs/configuration.md
Normal file
|
@ -0,0 +1,114 @@
|
|||
# Configuration
|
||||
|
||||
**Conduit** is configured using a TOML file. The configuration file is loaded from the path specified by the `CONDUIT_CONFIG` environment variable.
|
||||
|
||||
> **Note:** The configuration file is required to run Conduit. If the `CONDUIT_CONFIG` environment variable is not set, Conduit will exit with an error.
|
||||
|
||||
> **Note:** If you update the configuration file, you must restart Conduit for the changes to take effect
|
||||
|
||||
> **Note:** You can also configure Conduit by using `CONDUIT_{field_name}` environment variables. To set values inside a table, use `CONDUIT_{table_name}__{field_name}`. Example: `CONDUIT_SERVER_NAME="example.org"`
|
||||
|
||||
Conduit's configuration file is divided into the following sections:
|
||||
|
||||
- [Global](#global)
|
||||
- [TLS](#tls)
|
||||
- [Proxy](#proxy)
|
||||
|
||||
|
||||
## Global
|
||||
|
||||
The `global` section contains the following fields:
|
||||
|
||||
> **Note:** The `*` symbol indicates that the field is required, and the values in **parentheses** are the possible values
|
||||
|
||||
| Field | Type | Description | Default |
|
||||
| --- | --- | --- | --- |
|
||||
| `address` | `string` | The address to bind to | `"127.0.0.1"` |
|
||||
| `port` | `integer` | The port to bind to | `8000` |
|
||||
| `tls` | `table` | See the [TLS configuration](#tls) | N/A |
|
||||
| `server_name`_*_ | `string` | The server name | N/A |
|
||||
| `database_backend`_*_ | `string` | The database backend to use (`"rocksdb"` *recommended*, `"sqlite"`) | N/A |
|
||||
| `database_path`_*_ | `string` | The path to the database file/dir | N/A |
|
||||
| `db_cache_capacity_mb` | `float` | The cache capacity, in MB | `300.0` |
|
||||
| `enable_lightning_bolt` | `boolean` | Add `⚡️` emoji to end of user's display name | `true` |
|
||||
| `allow_check_for_updates` | `boolean` | Allow Conduit to check for updates | `true` |
|
||||
| `conduit_cache_capacity_modifier` | `float` | The value to multiply the default cache capacity by | `1.0` |
|
||||
| `rocksdb_max_open_files` | `integer` | The maximum number of open files | `1000` |
|
||||
| `pdu_cache_capacity` | `integer` | The maximum number of Persisted Data Units (PDUs) to cache | `150000` |
|
||||
| `cleanup_second_interval` | `integer` | How often conduit should clean up the database, in seconds | `60` |
|
||||
| `max_request_size` | `integer` | The maximum request size, in bytes | `20971520` (20 MiB) |
|
||||
| `max_concurrent_requests` | `integer` | The maximum number of concurrent requests | `100` |
|
||||
| `max_fetch_prev_events` | `integer` | The maximum number of previous events to fetch per request if conduit notices events are missing | `100` |
|
||||
| `allow_registration` | `boolean` | Opens your homeserver to public registration | `false` |
|
||||
| `registration_token` | `string` | The token users need to have when registering to your homeserver | N/A |
|
||||
| `allow_encryption` | `boolean` | Allow users to enable encryption in their rooms | `true` |
|
||||
| `allow_federation` | `boolean` | Allow federation with other servers | `true` |
|
||||
| `allow_room_creation` | `boolean` | Allow users to create rooms | `true` |
|
||||
| `allow_unstable_room_versions` | `boolean` | Allow users to create and join rooms with unstable versions | `true` |
|
||||
| `default_room_version` | `string` | The default room version (`"6"`-`"10"`)| `"10"` |
|
||||
| `allow_jaeger` | `boolean` | Allow Jaeger tracing | `false` |
|
||||
| `tracing_flame` | `boolean` | Enable flame tracing | `false` |
|
||||
| `proxy` | `table` | See the [Proxy configuration](#proxy) | N/A |
|
||||
| `jwt_secret` | `string` | The secret used in the JWT to enable JWT login without it a 400 error will be returned | N/A |
|
||||
| `trusted_servers` | `array` | The list of trusted servers to gather public keys of offline servers | `["matrix.org"]` |
|
||||
| `log` | `string` | The log verbosity to use | `"warn"` |
|
||||
| `turn_username` | `string` | The TURN username | `""` |
|
||||
| `turn_password` | `string` | The TURN password | `""` |
|
||||
| `turn_uris` | `array` | The TURN URIs | `[]` |
|
||||
| `turn_secret` | `string` | The TURN secret | `""` |
|
||||
| `turn_ttl` | `integer` | The TURN TTL in seconds | `86400` |
|
||||
| `emergency_password` | `string` | Set a password to login as the `conduit` user in case of emergency | N/A |
|
||||
| `well_known_client` | `string` | Used for [delegation](delegation.md) | See [delegation](delegation.md) |
|
||||
| `well_known_server` | `string` | Used for [delegation](delegation.md) | See [delegation](delegation.md) |
|
||||
|
||||
|
||||
### TLS
|
||||
The `tls` table contains the following fields:
|
||||
- `certs`: The path to the public PEM certificate
|
||||
- `key`: The path to the PEM private key
|
||||
|
||||
#### Example
|
||||
```toml
|
||||
[global.tls]
|
||||
certs = "/path/to/cert.pem"
|
||||
key = "/path/to/key.pem"
|
||||
```
|
||||
|
||||
|
||||
### Proxy
|
||||
You can choose what requests conduit should proxy (if any). The `proxy` table contains the following fields
|
||||
|
||||
#### Global
|
||||
The global option will proxy all outgoing requests. The `global` table contains the following fields:
|
||||
- `url`: The URL of the proxy server
|
||||
##### Example
|
||||
```toml
|
||||
[global.proxy.global]
|
||||
url = "https://example.com"
|
||||
```
|
||||
|
||||
#### By domain
|
||||
An array of tables that contain the following fields:
|
||||
- `url`: The URL of the proxy server
|
||||
- `include`: Domains that should be proxied (assumed to be `["*"]` if unset)
|
||||
- `exclude`: Domains that should not be proxied (takes precedent over `include`)
|
||||
|
||||
Both `include` and `exclude` allow for glob pattern matching.
|
||||
##### Example
|
||||
In this example, all requests to domains ending in `.onion` and `matrix.secretly-an-onion-domain.xyz`
|
||||
will be proxied via `socks://localhost:9050`, except for domains ending in `.myspecial.onion`. You can add as many `by_domain` tables as you need.
|
||||
```toml
|
||||
[[global.proxy.by_domain]]
|
||||
url = "socks5://localhost:9050"
|
||||
include = ["*.onion", "matrix.secretly-an-onion-domain.xyz"]
|
||||
exclude = ["*.clearnet.onion"]
|
||||
```
|
||||
|
||||
### Example
|
||||
|
||||
> **Note:** The following example is a minimal configuration file. You should replace the values with your own.
|
||||
|
||||
```toml
|
||||
[global]
|
||||
{{#include ../conduit-example.toml:22:}}
|
||||
```
|
69
docs/delegation.md
Normal file
69
docs/delegation.md
Normal file
|
@ -0,0 +1,69 @@
|
|||
# Delegation
|
||||
|
||||
You can run Conduit on a separate domain than the actual server name (what shows up in user ids, aliases, etc.).
|
||||
For example you can have your users have IDs such as `@foo:example.org` and have aliases like `#bar:example.org`,
|
||||
while actually having Conduit hosted on the `matrix.example.org` domain. This is called delegation.
|
||||
|
||||
## Automatic (recommended)
|
||||
|
||||
Conduit has support for hosting delegation files by itself, and by default uses it to serve federation traffic on port 443.
|
||||
|
||||
With this method, you need to direct requests to `/.well-known/matrix/*` to Conduit in your reverse proxy.
|
||||
|
||||
This is only recommended if Conduit is on the same physical server as the server which serves your server name (e.g. example.org)
|
||||
as servers don't always seem to cache the response, leading to slower response times otherwise, but it should also work if you
|
||||
are connected to the server running Conduit using something like a VPN.
|
||||
|
||||
> **Note**: this will automatically allow you to use [sliding sync][0] without any extra configuration
|
||||
|
||||
To configure it, use the following options:
|
||||
| Field | Type | Description | Default |
|
||||
| --- | --- | --- | --- |
|
||||
| `well_known_client` | `String` | The URL that clients should use to connect to Conduit | `https://<server_name>` |
|
||||
| `well_known_server` | `String` | The hostname and port servers should use to connect to Conduit | `<server_name>:443` |
|
||||
|
||||
### Example
|
||||
|
||||
```toml
|
||||
[global]
|
||||
well_known_client = "https://matrix.example.org"
|
||||
well_known_server = "matrix.example.org:443"
|
||||
```
|
||||
|
||||
## Manual
|
||||
|
||||
Alternatively you can serve static JSON files to inform clients and servers how to connect to Conduit.
|
||||
|
||||
### Servers
|
||||
|
||||
For servers to discover how to access your domain, serve a response in the following format for `/.well-known/matrix/server`:
|
||||
|
||||
```json
|
||||
{
|
||||
"m.server": "matrix.example.org:443"
|
||||
}
|
||||
```
|
||||
Where `matrix.example.org` is the domain and `443` is the port Conduit is accessible at.
|
||||
|
||||
### Clients
|
||||
|
||||
For clients to discover how to access your domain, serve a response in the following format for `/.well-known/matrix/client`:
|
||||
```json
|
||||
{
|
||||
"m.homeserver": {
|
||||
"base_url": "https://matrix.example.org"
|
||||
}
|
||||
}
|
||||
```
|
||||
Where `matrix.example.org` is the URL Conduit is accessible at.
|
||||
|
||||
To ensure that all clients can access this endpoint, it is recommended you set the following headers for this endpoint:
|
||||
```
|
||||
Access-Control-Allow-Origin: *
|
||||
Access-Control-Allow-Methods: GET, POST, PUT, DELETE, OPTIONS
|
||||
Access-Control-Allow-Headers: X-Requested-With, Content-Type, Authorization
|
||||
```
|
||||
|
||||
If you also want to be able to use [sliding sync][0], look [here](faq.md#how-do-i-setup-sliding-sync).
|
||||
|
||||
[0]: https://matrix.org/blog/2023/09/matrix-2-0/#sliding-sync
|
3
docs/deploying.md
Normal file
3
docs/deploying.md
Normal file
|
@ -0,0 +1,3 @@
|
|||
# Deploying
|
||||
|
||||
This chapter describes various ways to deploy Conduit.
|
1
docs/deploying/debian.md
Normal file
1
docs/deploying/debian.md
Normal file
|
@ -0,0 +1 @@
|
|||
{{#include ../../debian/README.md}}
|
|
@ -7,8 +7,8 @@ services:
|
|||
### then you are ready to go.
|
||||
image: matrixconduit/matrix-conduit:latest
|
||||
### If you want to build a fresh image from the sources, then comment the image line and uncomment the
|
||||
### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this:
|
||||
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d
|
||||
### build lines. If you want meaningful labels in your built Conduit image, you should run docker compose like this:
|
||||
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker compose up -d
|
||||
# build:
|
||||
# context: .
|
||||
# args:
|
||||
|
@ -26,19 +26,19 @@ services:
|
|||
CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/
|
||||
CONDUIT_DATABASE_BACKEND: rocksdb
|
||||
CONDUIT_PORT: 6167
|
||||
CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
||||
CONDUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
||||
CONDUIT_ALLOW_REGISTRATION: 'true'
|
||||
#CONDUIT_REGISTRATION_TOKEN: '' # require password for registration
|
||||
CONDUIT_ALLOW_FEDERATION: 'true'
|
||||
CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
||||
#CONDUIT_LOG: warn,rocket=off,_=off,sled=off
|
||||
CONDUIT_ADDRESS: 0.0.0.0
|
||||
CONDUIT_CONFIG: '' # Ignore this
|
||||
|
||||
# We need some way to server the client and server .well-known json. The simplest way is to use a nginx container
|
||||
# to serve those two as static files. If you want to use a different way, delete or comment the below service, here
|
||||
# and in the docker-compose override file.
|
||||
# and in the docker compose override file.
|
||||
well-known:
|
||||
image: nginx:latest
|
||||
restart: unless-stopped
|
|
@ -18,7 +18,7 @@ services:
|
|||
|
||||
# We need some way to server the client and server .well-known json. The simplest way is to use a nginx container
|
||||
# to serve those two as static files. If you want to use a different way, delete or comment the below service, here
|
||||
# and in the docker-compose file.
|
||||
# and in the docker compose file.
|
||||
well-known:
|
||||
labels:
|
||||
- "traefik.enable=true"
|
|
@ -7,8 +7,8 @@ services:
|
|||
### then you are ready to go.
|
||||
image: matrixconduit/matrix-conduit:latest
|
||||
### If you want to build a fresh image from the sources, then comment the image line and uncomment the
|
||||
### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this:
|
||||
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d
|
||||
### build lines. If you want meaningful labels in your built Conduit image, you should run docker compose like this:
|
||||
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker compose up -d
|
||||
# build:
|
||||
# context: .
|
||||
# args:
|
||||
|
@ -31,20 +31,18 @@ services:
|
|||
### Uncomment and change values as desired
|
||||
# CONDUIT_ADDRESS: 0.0.0.0
|
||||
# CONDUIT_PORT: 6167
|
||||
# CONDUIT_REGISTRATION_TOKEN: '' # require password for registration
|
||||
# CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string ''
|
||||
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
|
||||
# CONDUIT_LOG: info # default is: "warn,_=off,sled=off"
|
||||
# CONDUIT_ALLOW_JAEGER: 'false'
|
||||
# CONDUIT_ALLOW_ENCRYPTION: 'true'
|
||||
# CONDUIT_ALLOW_FEDERATION: 'true'
|
||||
# CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||
# CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit
|
||||
# CONDUIT_WORKERS: 10
|
||||
# CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
||||
# CONDUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
||||
|
||||
# We need some way to server the client and server .well-known json. The simplest way is to use a nginx container
|
||||
# to serve those two as static files. If you want to use a different way, delete or comment the below service, here
|
||||
# and in the docker-compose override file.
|
||||
# and in the docker compose override file.
|
||||
well-known:
|
||||
image: nginx:latest
|
||||
restart: unless-stopped
|
|
@ -7,8 +7,8 @@ services:
|
|||
### then you are ready to go.
|
||||
image: matrixconduit/matrix-conduit:latest
|
||||
### If you want to build a fresh image from the sources, then comment the image line and uncomment the
|
||||
### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this:
|
||||
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d
|
||||
### build lines. If you want meaningful labels in your built Conduit image, you should run docker compose like this:
|
||||
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker compose up -d
|
||||
# build:
|
||||
# context: .
|
||||
# args:
|
||||
|
@ -26,13 +26,12 @@ services:
|
|||
CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/
|
||||
CONDUIT_DATABASE_BACKEND: rocksdb
|
||||
CONDUIT_PORT: 6167
|
||||
CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
||||
CONDUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
||||
CONDUIT_ALLOW_REGISTRATION: 'true'
|
||||
CONDUIT_ALLOW_FEDERATION: 'true'
|
||||
CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
||||
#CONDUIT_LOG: warn,rocket=off,_=off,sled=off
|
||||
CONDUIT_ADDRESS: 0.0.0.0
|
||||
CONDUIT_CONFIG: '' # Ignore this
|
||||
#
|
|
@ -1,4 +1,4 @@
|
|||
# Deploy using Docker
|
||||
# Conduit for Docker
|
||||
|
||||
> **Note:** To run and use Conduit you should probably use it with a Domain or Subdomain behind a reverse proxy (like Nginx, Traefik, Apache, ...) with a Lets Encrypt certificate.
|
||||
|
||||
|
@ -61,24 +61,24 @@ docker run -d -p 8448:6167 \
|
|||
-e CONDUIT_DATABASE_BACKEND="rocksdb" \
|
||||
-e CONDUIT_ALLOW_REGISTRATION=true \
|
||||
-e CONDUIT_ALLOW_FEDERATION=true \
|
||||
-e CONDUIT_MAX_REQUEST_SIZE="20_000_000" \
|
||||
-e CONDUIT_MAX_REQUEST_SIZE="20000000" \
|
||||
-e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \
|
||||
-e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \
|
||||
-e CONDUIT_LOG="warn,rocket=off,_=off,sled=off" \
|
||||
-e CONDUIT_PORT="6167" \
|
||||
--name conduit <link>
|
||||
```
|
||||
|
||||
or you can use [docker-compose](#docker-compose).
|
||||
or you can use [docker compose](#docker-compose).
|
||||
|
||||
The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../conduit-example.toml).
|
||||
The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../configuration.md).
|
||||
You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need
|
||||
to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible values, please take a look at the `docker-compose.yml` file.
|
||||
|
||||
If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it.
|
||||
|
||||
### Docker-compose
|
||||
### Docker compose
|
||||
|
||||
If the `docker run` command is not for you or your setup, you can also use one of the provided `docker-compose` files.
|
||||
If the `docker run` command is not for you or your setup, you can also use one of the provided `docker compose` files.
|
||||
|
||||
Depending on your proxy setup, you can use one of the following files;
|
||||
- If you already have a `traefik` instance set up, use [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml)
|
||||
|
@ -88,15 +88,14 @@ Depending on your proxy setup, you can use one of the following files;
|
|||
When picking the traefik-related compose file, rename it so it matches `docker-compose.yml`, and
|
||||
rename the override file to `docker-compose.override.yml`. Edit the latter with the values you want
|
||||
for your server.
|
||||
|
||||
Additional info about deploying Conduit can be found [here](../DEPLOY.md).
|
||||
Additional info about deploying Conduit can be found [here](generic.md).
|
||||
|
||||
### Build
|
||||
|
||||
To build the Conduit image with docker-compose, you first need to open and modify the `docker-compose.yml` file. There you need to comment the `image:` option and uncomment the `build:` option. Then call docker-compose with:
|
||||
To build the Conduit image with docker compose, you first need to open and modify the `docker-compose.yml` file. There you need to comment the `image:` option and uncomment the `build:` option. Then call docker compose with:
|
||||
|
||||
```bash
|
||||
docker-compose up
|
||||
docker compose up
|
||||
```
|
||||
|
||||
This will also start the container right afterwards, so if want it to run in detached mode, you also should use the `-d` flag.
|
||||
|
@ -106,7 +105,7 @@ This will also start the container right afterwards, so if want it to run in det
|
|||
If you already have built the image or want to use one from the registries, you can just start the container and everything else in the compose file in detached mode with:
|
||||
|
||||
```bash
|
||||
docker-compose up -d
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
> **Note:** Don't forget to modify and adjust the compose file to your needs.
|
||||
|
@ -131,7 +130,7 @@ So...step by step:
|
|||
1. Copy [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or
|
||||
[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and [`docker-compose.override.yml`](docker-compose.override.yml) from the repository and remove `.for-traefik` (or `.with-traefik`) from the filename.
|
||||
2. Open both files and modify/adjust them to your needs. Meaning, change the `CONDUIT_SERVER_NAME` and the volume host mappings according to your needs.
|
||||
3. Create the `conduit.toml` config file, an example can be found [here](../conduit-example.toml), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars.
|
||||
3. Create the `conduit.toml` config file, an example can be found [here](../configuration.md), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars.
|
||||
4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`.
|
||||
5. Create the files needed by the `well-known` service.
|
||||
|
||||
|
@ -159,7 +158,7 @@ So...step by step:
|
|||
}
|
||||
```
|
||||
|
||||
6. Run `docker-compose up -d`
|
||||
6. Run `docker compose up -d`
|
||||
7. Connect to your homeserver with your preferred client and create a user. You should do this immediately after starting Conduit, because the first created user is the admin.
|
||||
|
||||
|
||||
|
@ -198,8 +197,8 @@ Run the [Coturn](https://hub.docker.com/r/coturn/coturn) image using
|
|||
docker run -d --network=host -v $(pwd)/coturn.conf:/etc/coturn/turnserver.conf coturn/coturn
|
||||
```
|
||||
|
||||
or docker-compose. For the latter, paste the following section into a file called `docker-compose.yml`
|
||||
and run `docker-compose up -d` in the same directory.
|
||||
or docker compose. For the latter, paste the following section into a file called `docker-compose.yml`
|
||||
and run `docker compose up -d` in the same directory.
|
||||
|
||||
```yml
|
||||
version: 3
|
|
@ -1,4 +1,4 @@
|
|||
# Deploying Conduit
|
||||
# Generic deployment documentation
|
||||
|
||||
> ## Getting help
|
||||
>
|
||||
|
@ -10,29 +10,32 @@
|
|||
Although you might be able to compile Conduit for Windows, we do recommend running it on a Linux server. We therefore
|
||||
only offer Linux binaries.
|
||||
|
||||
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the appropriate url:
|
||||
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. For `arm`, you should use `aarch`. Now copy the appropriate url:
|
||||
|
||||
| CPU Architecture | Download stable version | Download development version |
|
||||
| ------------------------------------------- | --------------------------------------------------------------- | ----------------------------------------------------------- |
|
||||
| x84_64 / amd64 (Most servers and computers) | [Binary][x84_64-glibc-master] / [.deb][x84_64-glibc-master-deb] | [Binary][x84_64-glibc-next] / [.deb][x84_64-glibc-next-deb] |
|
||||
| armv7 (e.g. Raspberry Pi by default) | [Binary][armv7-glibc-master] / [.deb][armv7-glibc-master-deb] | [Binary][armv7-glibc-next] / [.deb][armv7-glibc-next-deb] |
|
||||
| armv8 / aarch64 | [Binary][armv8-glibc-master] / [.deb][armv8-glibc-master-deb] | [Binary][armv8-glibc-next] / [.deb][armv8-glibc-next-deb] |
|
||||
**Stable/Main versions:**
|
||||
|
||||
| Target | Type | Download |
|
||||
|-|-|-|
|
||||
| `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/x86_64-unknown-linux-musl.deb?job=artifacts) |
|
||||
| `aarch64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/aarch64-unknown-linux-musl.deb?job=artifacts) |
|
||||
| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/x86_64-unknown-linux-musl?job=artifacts) |
|
||||
| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/aarch64-unknown-linux-musl?job=artifacts) |
|
||||
| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/oci-image-amd64.tar.gz?job=artifacts) |
|
||||
| `aarch64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/oci-image-arm64v8.tar.gz?job=artifacts) |
|
||||
|
||||
These builds were created on and linked against the glibc version shipped with Debian bullseye.
|
||||
If you use a system with an older glibc version (e.g. RHEL8), you might need to compile Conduit yourself.
|
||||
|
||||
[x84_64-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit?job=docker:master
|
||||
[armv7-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit?job=docker:master
|
||||
[armv8-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit?job=docker:master
|
||||
[x84_64-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_amd64/conduit?job=docker:next
|
||||
[armv7-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm_v7/conduit?job=docker:next
|
||||
[armv8-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm64/conduit?job=docker:next
|
||||
[x84_64-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit.deb?job=docker:master
|
||||
[armv7-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit.deb?job=docker:master
|
||||
[armv8-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit.deb?job=docker:master
|
||||
[x84_64-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_amd64/conduit.deb?job=docker:next
|
||||
[armv7-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm_v7/conduit.deb?job=docker:next
|
||||
[armv8-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm64/conduit.deb?job=docker:next
|
||||
**Latest/Next versions:**
|
||||
|
||||
| Target | Type | Download |
|
||||
|-|-|-|
|
||||
| `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl.deb?job=artifacts) |
|
||||
| `aarch64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl.deb?job=artifacts) |
|
||||
| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl?job=artifacts) |
|
||||
| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl?job=artifacts) |
|
||||
| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=artifacts) |
|
||||
| `aarch64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-arm64v8.tar.gz?job=artifacts) |
|
||||
|
||||
```bash
|
||||
$ sudo wget -O /usr/local/bin/matrix-conduit <url>
|
||||
|
@ -53,26 +56,6 @@ Then, `cd` into the source tree of conduit-next and run:
|
|||
$ cargo build --release
|
||||
```
|
||||
|
||||
If you want to cross compile Conduit to another architecture, read the guide below.
|
||||
|
||||
<details>
|
||||
<summary>Cross compilation</summary>
|
||||
|
||||
As easiest way to compile conduit for another platform [cross-rs](https://github.com/cross-rs/cross) is recommended, so install it first.
|
||||
|
||||
In order to use RockDB as storage backend append `-latomic` to linker flags.
|
||||
|
||||
For example, to build a binary for Raspberry Pi Zero W (ARMv6) you need `arm-unknown-linux-gnueabihf` as compilation
|
||||
target.
|
||||
|
||||
```bash
|
||||
git clone https://gitlab.com/famedly/conduit.git
|
||||
cd conduit
|
||||
export RUSTFLAGS='-C link-arg=-lgcc -Clink-arg=-latomic -Clink-arg=-static-libgcc'
|
||||
cross build --release --no-default-features --features conduit_bin,backend_rocksdb,jemalloc --target=arm-unknown-linux-gnueabihf
|
||||
```
|
||||
</details>
|
||||
|
||||
## Adding a Conduit user
|
||||
|
||||
While Conduit can run as any user it is usually better to use dedicated users for different services. This also allows
|
||||
|
@ -133,57 +116,12 @@ $ sudo systemctl daemon-reload
|
|||
|
||||
## Creating the Conduit configuration file
|
||||
|
||||
Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste this in **and take a moment
|
||||
to read it. You need to change at least the server name.**
|
||||
Now we need to create the Conduit's config file in
|
||||
`/etc/matrix-conduit/conduit.toml`. Paste in the contents of
|
||||
[`conduit-example.toml`](../configuration.md) **and take a moment to read it.
|
||||
You need to change at least the server name.**
|
||||
You can also choose to use a different database backend, but right now only `rocksdb` and `sqlite` are recommended.
|
||||
|
||||
```toml
|
||||
[global]
|
||||
# The server_name is the pretty name of this server. It is used as a suffix for user
|
||||
# and room ids. Examples: matrix.org, conduit.rs
|
||||
|
||||
# The Conduit server needs all /_matrix/ requests to be reachable at
|
||||
# https://your.server.name/ on port 443 (client-server) and 8448 (federation).
|
||||
|
||||
# If that's not possible for you, you can create /.well-known files to redirect
|
||||
# requests. See
|
||||
# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client
|
||||
# and
|
||||
# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server
|
||||
# for more information
|
||||
|
||||
# YOU NEED TO EDIT THIS
|
||||
#server_name = "your.server.name"
|
||||
|
||||
# This is the only directory where Conduit will save its data
|
||||
database_path = "/var/lib/matrix-conduit/"
|
||||
database_backend = "rocksdb"
|
||||
|
||||
# The port Conduit will be running on. You need to set up a reverse proxy in
|
||||
# your web server (e.g. apache or nginx), so all requests to /_matrix on port
|
||||
# 443 and 8448 will be forwarded to the Conduit instance running on this port
|
||||
# Docker users: Don't change this, you'll need to map an external port to this.
|
||||
port = 6167
|
||||
|
||||
# Max size for uploads
|
||||
max_request_size = 20_000_000 # in bytes
|
||||
|
||||
# Enables registration. If set to false, no users can register on this server.
|
||||
allow_registration = true
|
||||
|
||||
allow_federation = true
|
||||
allow_check_for_updates = true
|
||||
|
||||
# Server to get public keys from. You probably shouldn't change this
|
||||
trusted_servers = ["matrix.org"]
|
||||
|
||||
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||
#log = "warn,state_res=warn,rocket=off,_=off,sled=off"
|
||||
|
||||
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
||||
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
|
||||
```
|
||||
|
||||
## Setting the correct file permissions
|
||||
|
||||
As we are using a Conduit specific user we need to allow it to read the config. To do that you can run this command on
|
||||
|
@ -273,7 +211,7 @@ server {
|
|||
client_max_body_size 20M;
|
||||
|
||||
location /_matrix/ {
|
||||
proxy_pass http://127.0.0.1:6167$request_uri;
|
||||
proxy_pass http://127.0.0.1:6167;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_buffering off;
|
||||
proxy_read_timeout 5m;
|
||||
|
@ -326,7 +264,7 @@ $ sudo systemctl enable conduit
|
|||
|
||||
## How do I know it works?
|
||||
|
||||
You can open <https://app.element.io>, enter your homeserver and try to register.
|
||||
You can open [a Matrix client](https://matrix.org/ecosystem/clients), enter your homeserver and try to register. If you are using a registration token, use [Element web](https://app.element.io/), [Nheko](https://matrix.org/ecosystem/clients/nheko/) or [SchildiChat web](https://app.schildi.chat/), as they support this feature.
|
||||
|
||||
You can also use these commands as a quick health check.
|
||||
|
||||
|
@ -344,8 +282,8 @@ $ curl https://your.server.name:8448/_matrix/client/versions
|
|||
|
||||
## Audio/Video calls
|
||||
|
||||
For Audio/Video call functionality see the [TURN Guide](TURN.md).
|
||||
For Audio/Video call functionality see the [TURN Guide](../turn.md).
|
||||
|
||||
## Appservices
|
||||
|
||||
If you want to set up an appservice, take a look at the [Appservice Guide](APPSERVICES.md).
|
||||
If you want to set up an appservice, take a look at the [Appservice Guide](../appservices.md).
|
18
docs/deploying/nixos.md
Normal file
18
docs/deploying/nixos.md
Normal file
|
@ -0,0 +1,18 @@
|
|||
# Conduit for NixOS
|
||||
|
||||
Conduit can be acquired by Nix from various places:
|
||||
|
||||
* The `flake.nix` at the root of the repo
|
||||
* The `default.nix` at the root of the repo
|
||||
* From Nixpkgs
|
||||
|
||||
The `flake.nix` and `default.nix` do not (currently) provide a NixOS module, so
|
||||
(for now) [`services.matrix-conduit`][module] from Nixpkgs should be used to
|
||||
configure Conduit.
|
||||
|
||||
If you want to run the latest code, you should get Conduit from the `flake.nix`
|
||||
or `default.nix` and set [`services.matrix-conduit.package`][package]
|
||||
appropriately.
|
||||
|
||||
[module]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit
|
||||
[package]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit.package
|
41
docs/faq.md
Normal file
41
docs/faq.md
Normal file
|
@ -0,0 +1,41 @@
|
|||
# FAQ
|
||||
|
||||
Here are some of the most frequently asked questions about Conduit, and their answers.
|
||||
|
||||
## Why do I get a `M_INCOMPATIBLE_ROOM_VERSION` error when trying to join some rooms?
|
||||
|
||||
Conduit doesn't support room versions 1 and 2 at all, and doesn't properly support versions 3-5 currently. You can track the progress of adding support [here](https://gitlab.com/famedly/conduit/-/issues/433).
|
||||
|
||||
## How do I backup my server?
|
||||
|
||||
To backup your Conduit server, it's very easy.
|
||||
You can simply stop Conduit, make a copy or file system snapshot of the database directory, then start Conduit again.
|
||||
|
||||
> **Note**: When using a file system snapshot, it is not required that you stop the server, but it is still recommended as it is the safest option and should ensure your database is not left in an inconsistent state.
|
||||
|
||||
## How do I setup sliding sync?
|
||||
|
||||
If you use the [automatic method for delegation](delegation.md#automatic-recommended) or just proxy `.well-known/matrix/client` to Conduit, sliding sync should work with no extra configuration.
|
||||
If you don't, continue below.
|
||||
|
||||
You need to add a `org.matrix.msc3575.proxy` field to your `.well-known/matrix/client` response which contains a url which Conduit is accessible behind.
|
||||
Here is an example:
|
||||
```json
|
||||
{
|
||||
~ "m.homeserver": {
|
||||
~ "base_url": "https://matrix.example.org"
|
||||
~ },
|
||||
"org.matrix.msc3575.proxy": {
|
||||
"url": "https://matrix.example.org"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Can I migrate from Synapse to Conduit?
|
||||
|
||||
Not really. You can reuse the domain of your current server with Conduit, but you will not be able to migrate accounts automatically.
|
||||
Rooms that were federated can be re-joined via the other participating servers, however media and the like may be deleted from remote servers after some time, and hence might not be recoverable.
|
||||
|
||||
## How do I make someone an admin?
|
||||
|
||||
Simply invite them to the admin room. Once joined, they can administer the server by interacting with the `@conduit:<server_name>` user.
|
13
docs/introduction.md
Normal file
13
docs/introduction.md
Normal file
|
@ -0,0 +1,13 @@
|
|||
# Conduit
|
||||
|
||||
{{#include ../README.md:catchphrase}}
|
||||
|
||||
{{#include ../README.md:body}}
|
||||
|
||||
#### How can I deploy my own?
|
||||
|
||||
- [Deployment options](deploying.md)
|
||||
|
||||
If you want to connect an Appservice to Conduit, take a look at the [appservices documentation](appservices.md).
|
||||
|
||||
{{#include ../README.md:footer}}
|
|
@ -1,8 +1,8 @@
|
|||
# Setting up TURN/STURN
|
||||
# Setting up TURN/STUN
|
||||
|
||||
## General instructions
|
||||
|
||||
* It is assumed you have a [Coturn server](https://github.com/coturn/coturn) up and running. See [Synapse reference implementation](https://github.com/matrix-org/synapse/blob/develop/docs/turn-howto.md).
|
||||
* It is assumed you have a [Coturn server](https://github.com/coturn/coturn) up and running. See [Synapse reference implementation](https://github.com/element-hq/synapse/blob/develop/docs/turn-howto.md).
|
||||
|
||||
## Edit/Add a few settings to your existing conduit.toml
|
||||
|
79
engage.toml
Normal file
79
engage.toml
Normal file
|
@ -0,0 +1,79 @@
|
|||
interpreter = ["bash", "-euo", "pipefail", "-c"]
|
||||
|
||||
[[task]]
|
||||
group = "versions"
|
||||
name = "engage"
|
||||
script = "engage --version"
|
||||
|
||||
[[task]]
|
||||
group = "versions"
|
||||
name = "rustc"
|
||||
script = "rustc --version"
|
||||
|
||||
[[task]]
|
||||
group = "versions"
|
||||
name = "cargo"
|
||||
script = "cargo --version"
|
||||
|
||||
[[task]]
|
||||
group = "versions"
|
||||
name = "cargo-fmt"
|
||||
script = "cargo fmt --version"
|
||||
|
||||
[[task]]
|
||||
group = "versions"
|
||||
name = "rustdoc"
|
||||
script = "rustdoc --version"
|
||||
|
||||
[[task]]
|
||||
group = "versions"
|
||||
name = "cargo-clippy"
|
||||
script = "cargo clippy -- --version"
|
||||
|
||||
[[task]]
|
||||
group = "versions"
|
||||
name = "lychee"
|
||||
script = "lychee --version"
|
||||
|
||||
[[task]]
|
||||
group = "lints"
|
||||
name = "cargo-fmt"
|
||||
script = "cargo fmt --check -- --color=always"
|
||||
|
||||
[[task]]
|
||||
group = "lints"
|
||||
name = "cargo-doc"
|
||||
script = """
|
||||
RUSTDOCFLAGS="-D warnings" cargo doc \
|
||||
--workspace \
|
||||
--no-deps \
|
||||
--document-private-items \
|
||||
--color always
|
||||
"""
|
||||
|
||||
[[task]]
|
||||
group = "lints"
|
||||
name = "cargo-clippy"
|
||||
script = "cargo clippy --workspace --all-targets --color=always -- -D warnings"
|
||||
|
||||
[[task]]
|
||||
group = "lints"
|
||||
name = "taplo-fmt"
|
||||
script = "taplo fmt --check --colors always"
|
||||
|
||||
[[task]]
|
||||
group = "lints"
|
||||
name = "lychee"
|
||||
script = "lychee --offline docs"
|
||||
|
||||
[[task]]
|
||||
group = "tests"
|
||||
name = "cargo"
|
||||
script = """
|
||||
cargo test \
|
||||
--workspace \
|
||||
--all-targets \
|
||||
--color=always \
|
||||
-- \
|
||||
--color=always
|
||||
"""
|
206
flake.lock
generated
206
flake.lock
generated
|
@ -1,22 +1,41 @@
|
|||
{
|
||||
"nodes": {
|
||||
"crane": {
|
||||
"attic": {
|
||||
"inputs": {
|
||||
"crane": "crane",
|
||||
"flake-compat": "flake-compat",
|
||||
"flake-utils": [
|
||||
"flake-utils"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"rust-overlay": "rust-overlay"
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"nixpkgs-stable": "nixpkgs-stable"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1688772518,
|
||||
"narHash": "sha256-ol7gZxwvgLnxNSZwFTDJJ49xVY5teaSvF7lzlo3YQfM=",
|
||||
"lastModified": 1707922053,
|
||||
"narHash": "sha256-wSZjK+rOXn+UQiP1NbdNn5/UW6UcBxjvlqr2wh++MbM=",
|
||||
"owner": "zhaofengli",
|
||||
"repo": "attic",
|
||||
"rev": "6eabc3f02fae3683bffab483e614bebfcd476b21",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "zhaofengli",
|
||||
"ref": "main",
|
||||
"repo": "attic",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"crane": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"attic",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1702918879,
|
||||
"narHash": "sha256-tWJqzajIvYcaRWxn+cLUB9L9Pv4dQ3Bfit/YjU5ze3g=",
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "8b08e96c9af8c6e3a2b69af5a7fa168750fcf88e",
|
||||
"rev": "7195c00c272fdd92fc74e7d5a0a2844b9fadb2fb",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -25,6 +44,27 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"crane_2": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1713721181,
|
||||
"narHash": "sha256-Vz1KRVTzU3ClBfyhOj8gOehZk21q58T1YsXC30V23PU=",
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "55f4939ac59ff8f89c6a4029730a2d49ea09105f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "ipetkov",
|
||||
"ref": "master",
|
||||
"repo": "crane",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"fenix": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
|
@ -33,11 +73,11 @@
|
|||
"rust-analyzer-src": "rust-analyzer-src"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1689488573,
|
||||
"narHash": "sha256-diVASflKCCryTYv0djvMnP2444mFsIG0ge5pa7ahauQ=",
|
||||
"lastModified": 1709619709,
|
||||
"narHash": "sha256-l6EPVJfwfelWST7qWQeP6t/TDK3HHv5uUB1b2vw4mOQ=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "39096fe3f379036ff4a5fa198950b8e79defe939",
|
||||
"rev": "c8943ea9e98d41325ff57d4ec14736d330b321b2",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -62,16 +102,29 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"flake-compat_2": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1689068808,
|
||||
"narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=",
|
||||
"lastModified": 1696426674,
|
||||
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils": {
|
||||
"locked": {
|
||||
"lastModified": 1667395993,
|
||||
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4",
|
||||
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -80,13 +133,78 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils_2": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1709126324,
|
||||
"narHash": "sha256-q6EQdSeUZOG26WelxqkmR7kArjgWCdw5sfJVHPH/7j8=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "d465f4819400de7c8d874d50b982301f28a84605",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix-filter": {
|
||||
"locked": {
|
||||
"lastModified": 1705332318,
|
||||
"narHash": "sha256-kcw1yFeJe9N4PjQji9ZeX47jg0p9A0DuU4djKvg1a7I=",
|
||||
"owner": "numtide",
|
||||
"repo": "nix-filter",
|
||||
"rev": "3449dc925982ad46246cfc36469baf66e1b64f17",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "nix-filter",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1689444953,
|
||||
"narHash": "sha256-0o56bfb2LC38wrinPdCGLDScd77LVcr7CrH1zK7qvDg=",
|
||||
"lastModified": 1702539185,
|
||||
"narHash": "sha256-KnIRG5NMdLIpEkZTnN5zovNYc0hhXjAgv6pfd5Z4c7U=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "8acef304efe70152463a6399f73e636bcc363813",
|
||||
"rev": "aa9d4729cbc99dabacb50e3994dcefb3ea0f7447",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-stable": {
|
||||
"locked": {
|
||||
"lastModified": 1702780907,
|
||||
"narHash": "sha256-blbrBBXjjZt6OKTcYX1jpe9SRof2P9ZYWPzq22tzXAA=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "1e2e384c5b7c50dbf8e9c441a9e58d85f408b01f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-23.11",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1709479366,
|
||||
"narHash": "sha256-n6F0n8UV6lnTZbYPl1A9q1BS0p4hduAv1mGAP17CVd0=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "b8697e57f10292a6165a20f03d2f42920dfaf973",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -98,20 +216,23 @@
|
|||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"crane": "crane",
|
||||
"attic": "attic",
|
||||
"crane": "crane_2",
|
||||
"fenix": "fenix",
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": "nixpkgs"
|
||||
"flake-compat": "flake-compat_2",
|
||||
"flake-utils": "flake-utils_2",
|
||||
"nix-filter": "nix-filter",
|
||||
"nixpkgs": "nixpkgs_2"
|
||||
}
|
||||
},
|
||||
"rust-analyzer-src": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1689441253,
|
||||
"narHash": "sha256-4MSDZaFI4DOfsLIZYPMBl0snzWhX1/OqR/QHir382CY=",
|
||||
"lastModified": 1709571018,
|
||||
"narHash": "sha256-ISFrxHxE0J5g7lDAscbK88hwaT5uewvWoma9TlFmRzM=",
|
||||
"owner": "rust-lang",
|
||||
"repo": "rust-analyzer",
|
||||
"rev": "996e054f1eb1dbfc8455ecabff0f6ff22ba7f7c8",
|
||||
"rev": "9f14343f9ee24f53f17492c5f9b653427e2ad15e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -121,31 +242,6 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"rust-overlay": {
|
||||
"inputs": {
|
||||
"flake-utils": [
|
||||
"crane",
|
||||
"flake-utils"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"crane",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1688351637,
|
||||
"narHash": "sha256-CLTufJ29VxNOIZ8UTg0lepsn3X03AmopmaLTTeHDCL4=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "f9b92316727af9e6c7fee4a761242f7f46880329",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
|
|
160
flake.nix
160
flake.nix
|
@ -2,92 +2,114 @@
|
|||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable";
|
||||
flake-utils.url = "github:numtide/flake-utils";
|
||||
nix-filter.url = "github:numtide/nix-filter";
|
||||
flake-compat = {
|
||||
url = "github:edolstra/flake-compat";
|
||||
flake = false;
|
||||
};
|
||||
|
||||
fenix = {
|
||||
url = "github:nix-community/fenix";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
crane = {
|
||||
url = "github:ipetkov/crane";
|
||||
url = "github:ipetkov/crane?ref=master";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
inputs.flake-utils.follows = "flake-utils";
|
||||
};
|
||||
attic.url = "github:zhaofengli/attic?ref=main";
|
||||
};
|
||||
|
||||
outputs =
|
||||
{ self
|
||||
, nixpkgs
|
||||
, flake-utils
|
||||
|
||||
, fenix
|
||||
, crane
|
||||
}: flake-utils.lib.eachDefaultSystem (system:
|
||||
outputs = inputs:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
# Keep sorted
|
||||
mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: {
|
||||
craneLib =
|
||||
(inputs.crane.mkLib pkgs).overrideToolchain self.toolchain;
|
||||
|
||||
# Use mold on Linux
|
||||
stdenv = if pkgs.stdenv.isLinux then
|
||||
pkgs.stdenvAdapters.useMoldLinker pkgs.stdenv
|
||||
else
|
||||
pkgs.stdenv;
|
||||
default = self.callPackage ./nix/pkgs/default {};
|
||||
|
||||
# Nix-accessible `Cargo.toml`
|
||||
cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml);
|
||||
inherit inputs;
|
||||
|
||||
# The Rust toolchain to use
|
||||
toolchain = fenix.packages.${system}.toolchainOf {
|
||||
# Use the Rust version defined in `Cargo.toml`
|
||||
channel = cargoToml.package.rust-version;
|
||||
oci-image = self.callPackage ./nix/pkgs/oci-image {};
|
||||
|
||||
# THE rust-version HASH
|
||||
sha256 = "sha256-gdYqng0y9iHYzYPAdkC/ka3DRny3La/S5G8ASj0Ayyc=";
|
||||
};
|
||||
book = self.callPackage ./nix/pkgs/book {};
|
||||
|
||||
# The system's RocksDB
|
||||
ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb}/include";
|
||||
ROCKSDB_LIB_DIR = "${pkgs.rocksdb}/lib";
|
||||
|
||||
# Shared between the package and the devShell
|
||||
nativeBuildInputs = (with pkgs.rustPlatform; [
|
||||
bindgenHook
|
||||
]);
|
||||
|
||||
builder =
|
||||
((crane.mkLib pkgs).overrideToolchain toolchain.toolchain).buildPackage;
|
||||
rocksdb =
|
||||
let
|
||||
version = "9.1.1";
|
||||
in
|
||||
{
|
||||
packages.default = builder {
|
||||
src = ./.;
|
||||
|
||||
inherit
|
||||
stdenv
|
||||
nativeBuildInputs
|
||||
ROCKSDB_INCLUDE_DIR
|
||||
ROCKSDB_LIB_DIR;
|
||||
};
|
||||
|
||||
devShells.default = (pkgs.mkShell.override { inherit stdenv; }) {
|
||||
# Rust Analyzer needs to be able to find the path to default crate
|
||||
# sources, and it can read this environment variable to do so
|
||||
RUST_SRC_PATH = "${toolchain.rust-src}/lib/rustlib/src/rust/library";
|
||||
|
||||
inherit
|
||||
ROCKSDB_INCLUDE_DIR
|
||||
ROCKSDB_LIB_DIR;
|
||||
|
||||
# Development tools
|
||||
nativeBuildInputs = nativeBuildInputs ++ (with toolchain; [
|
||||
cargo
|
||||
clippy
|
||||
rust-src
|
||||
rustc
|
||||
rustfmt
|
||||
]);
|
||||
};
|
||||
|
||||
checks = {
|
||||
packagesDefault = self.packages.${system}.default;
|
||||
devShellsDefault = self.devShells.${system}.default;
|
||||
pkgs.rocksdb.overrideAttrs (old: {
|
||||
inherit version;
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "facebook";
|
||||
repo = "rocksdb";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-/Xf0bzNJPclH9IP80QNaABfhj4IAR5LycYET18VFCXc=";
|
||||
};
|
||||
});
|
||||
|
||||
shell = self.callPackage ./nix/shell.nix {};
|
||||
|
||||
# The Rust toolchain to use
|
||||
toolchain = inputs
|
||||
.fenix
|
||||
.packages
|
||||
.${pkgs.pkgsBuildHost.system}
|
||||
.fromToolchainFile {
|
||||
file = ./rust-toolchain.toml;
|
||||
|
||||
# See also `rust-toolchain.toml`
|
||||
sha256 = "sha256-Ngiz76YP4HTY75GGdH2P+APE/DEIx2R/Dn+BwwOyzZU=";
|
||||
};
|
||||
});
|
||||
in
|
||||
inputs.flake-utils.lib.eachDefaultSystem (system:
|
||||
let
|
||||
pkgs = inputs.nixpkgs.legacyPackages.${system};
|
||||
in
|
||||
{
|
||||
packages = {
|
||||
default = (mkScope pkgs).default;
|
||||
oci-image = (mkScope pkgs).oci-image;
|
||||
book = (mkScope pkgs).book;
|
||||
}
|
||||
//
|
||||
builtins.listToAttrs
|
||||
(builtins.concatLists
|
||||
(builtins.map
|
||||
(crossSystem:
|
||||
let
|
||||
binaryName = "static-${crossSystem}";
|
||||
pkgsCrossStatic =
|
||||
(import inputs.nixpkgs {
|
||||
inherit system;
|
||||
crossSystem = {
|
||||
config = crossSystem;
|
||||
};
|
||||
}).pkgsStatic;
|
||||
in
|
||||
[
|
||||
# An output for a statically-linked binary
|
||||
{
|
||||
name = binaryName;
|
||||
value = (mkScope pkgsCrossStatic).default;
|
||||
}
|
||||
|
||||
# An output for an OCI image based on that binary
|
||||
{
|
||||
name = "oci-image-${crossSystem}";
|
||||
value = (mkScope pkgsCrossStatic).oci-image;
|
||||
}
|
||||
]
|
||||
)
|
||||
[
|
||||
"x86_64-unknown-linux-musl"
|
||||
"aarch64-unknown-linux-musl"
|
||||
]
|
||||
)
|
||||
);
|
||||
|
||||
devShells.default = (mkScope pkgs).shell;
|
||||
}
|
||||
);
|
||||
}
|
||||
|
|
198
nix/README.md
198
nix/README.md
|
@ -1,198 +0,0 @@
|
|||
# Conduit for Nix/NixOS
|
||||
|
||||
This guide assumes you have a recent version of Nix (^2.4) installed.
|
||||
|
||||
Since Conduit ships as a Nix flake, you'll first need to [enable
|
||||
flakes][enable_flakes].
|
||||
|
||||
You can now use the usual Nix commands to interact with Conduit's flake. For
|
||||
example, `nix run gitlab:famedly/conduit` will run Conduit (though you'll need
|
||||
to provide configuration and such manually as usual).
|
||||
|
||||
If your NixOS configuration is defined as a flake, you can depend on this flake
|
||||
to provide a more up-to-date version than provided by `nixpkgs`. In your flake,
|
||||
add the following to your `inputs`:
|
||||
|
||||
```nix
|
||||
conduit = {
|
||||
url = "gitlab:famedly/conduit";
|
||||
|
||||
# Assuming you have an input for nixpkgs called `nixpkgs`. If you experience
|
||||
# build failures while using this, try commenting/deleting this line. This
|
||||
# will probably also require you to always build from source.
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
```
|
||||
|
||||
Next, make sure you're passing your flake inputs to the `specialArgs` argument
|
||||
of `nixpkgs.lib.nixosSystem` [as explained here][specialargs]. This guide will
|
||||
assume you've named the group `flake-inputs`.
|
||||
|
||||
Now you can configure Conduit and a reverse proxy for it. Add the following to
|
||||
a new Nix file and include it in your configuration:
|
||||
|
||||
```nix
|
||||
{ config
|
||||
, pkgs
|
||||
, flake-inputs
|
||||
, ...
|
||||
}:
|
||||
|
||||
let
|
||||
# You'll need to edit these values
|
||||
|
||||
# The hostname that will appear in your user and room IDs
|
||||
server_name = "example.com";
|
||||
|
||||
# The hostname that Conduit actually runs on
|
||||
#
|
||||
# This can be the same as `server_name` if you want. This is only necessary
|
||||
# when Conduit is running on a different machine than the one hosting your
|
||||
# root domain. This configuration also assumes this is all running on a single
|
||||
# machine, some tweaks will need to be made if this is not the case.
|
||||
matrix_hostname = "matrix.${server_name}";
|
||||
|
||||
# An admin email for TLS certificate notifications
|
||||
admin_email = "admin@${server_name}";
|
||||
|
||||
# These ones you can leave alone
|
||||
|
||||
# Build a dervation that stores the content of `${server_name}/.well-known/matrix/server`
|
||||
well_known_server = pkgs.writeText "well-known-matrix-server" ''
|
||||
{
|
||||
"m.server": "${matrix_hostname}"
|
||||
}
|
||||
'';
|
||||
|
||||
# Build a dervation that stores the content of `${server_name}/.well-known/matrix/client`
|
||||
well_known_client = pkgs.writeText "well-known-matrix-client" ''
|
||||
{
|
||||
"m.homeserver": {
|
||||
"base_url": "https://${matrix_hostname}"
|
||||
}
|
||||
}
|
||||
'';
|
||||
in
|
||||
|
||||
{
|
||||
# Configure Conduit itself
|
||||
services.matrix-conduit = {
|
||||
enable = true;
|
||||
|
||||
# This causes NixOS to use the flake defined in this repository instead of
|
||||
# the build of Conduit built into nixpkgs.
|
||||
package = flake-inputs.conduit.packages.${pkgs.system}.default;
|
||||
|
||||
settings.global = {
|
||||
inherit server_name;
|
||||
};
|
||||
};
|
||||
|
||||
# Configure automated TLS acquisition/renewal
|
||||
security.acme = {
|
||||
acceptTerms = true;
|
||||
defaults = {
|
||||
email = admin_email;
|
||||
};
|
||||
};
|
||||
|
||||
# ACME data must be readable by the NGINX user
|
||||
users.users.nginx.extraGroups = [
|
||||
"acme"
|
||||
];
|
||||
|
||||
# Configure NGINX as a reverse proxy
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
recommendedProxySettings = true;
|
||||
|
||||
virtualHosts = {
|
||||
"${matrix_hostname}" = {
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
|
||||
listen = [
|
||||
{
|
||||
addr = "0.0.0.0";
|
||||
port = 443;
|
||||
ssl = true;
|
||||
}
|
||||
{
|
||||
addr = "[::]";
|
||||
port = 443;
|
||||
ssl = true;
|
||||
} {
|
||||
addr = "0.0.0.0";
|
||||
port = 8448;
|
||||
ssl = true;
|
||||
}
|
||||
{
|
||||
addr = "[::]";
|
||||
port = 8448;
|
||||
ssl = true;
|
||||
}
|
||||
];
|
||||
|
||||
locations."/_matrix/" = {
|
||||
proxyPass = "http://backend_conduit$request_uri";
|
||||
proxyWebsockets = true;
|
||||
extraConfig = ''
|
||||
proxy_set_header Host $host;
|
||||
proxy_buffering off;
|
||||
'';
|
||||
};
|
||||
|
||||
extraConfig = ''
|
||||
merge_slashes off;
|
||||
'';
|
||||
};
|
||||
|
||||
"${server_name}" = {
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
|
||||
locations."=/.well-known/matrix/server" = {
|
||||
# Use the contents of the derivation built previously
|
||||
alias = "${well_known_server}";
|
||||
|
||||
extraConfig = ''
|
||||
# Set the header since by default NGINX thinks it's just bytes
|
||||
default_type application/json;
|
||||
'';
|
||||
};
|
||||
|
||||
locations."=/.well-known/matrix/client" = {
|
||||
# Use the contents of the derivation built previously
|
||||
alias = "${well_known_client}";
|
||||
|
||||
extraConfig = ''
|
||||
# Set the header since by default NGINX thinks it's just bytes
|
||||
default_type application/json;
|
||||
|
||||
# https://matrix.org/docs/spec/client_server/r0.4.0#web-browser-clients
|
||||
add_header Access-Control-Allow-Origin "*";
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
upstreams = {
|
||||
"backend_conduit" = {
|
||||
servers = {
|
||||
"[::1]:${toString config.services.matrix-conduit.settings.global.port}" = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Open firewall ports for HTTP, HTTPS, and Matrix federation
|
||||
networking.firewall.allowedTCPPorts = [ 80 443 8448 ];
|
||||
networking.firewall.allowedUDPPorts = [ 80 443 8448 ];
|
||||
}
|
||||
```
|
||||
|
||||
Now you can rebuild your system configuration and you should be good to go!
|
||||
|
||||
[enable_flakes]: https://nixos.wiki/wiki/Flakes#Enable_flakes
|
||||
|
||||
[specialargs]: https://nixos.wiki/wiki/Flakes#Using_nix_flakes_with_NixOS
|
34
nix/pkgs/book/default.nix
Normal file
34
nix/pkgs/book/default.nix
Normal file
|
@ -0,0 +1,34 @@
|
|||
# Keep sorted
|
||||
{ default
|
||||
, inputs
|
||||
, mdbook
|
||||
, stdenv
|
||||
}:
|
||||
|
||||
stdenv.mkDerivation {
|
||||
pname = "${default.pname}-book";
|
||||
version = default.version;
|
||||
|
||||
|
||||
src = let filter = inputs.nix-filter.lib; in filter {
|
||||
root = inputs.self;
|
||||
|
||||
# Keep sorted
|
||||
include = [
|
||||
"book.toml"
|
||||
"conduit-example.toml"
|
||||
"debian/README.md"
|
||||
"docs"
|
||||
"README.md"
|
||||
];
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
mdbook
|
||||
];
|
||||
|
||||
buildPhase = ''
|
||||
mdbook build
|
||||
mv public $out
|
||||
'';
|
||||
}
|
100
nix/pkgs/default/cross-compilation-env.nix
Normal file
100
nix/pkgs/default/cross-compilation-env.nix
Normal file
|
@ -0,0 +1,100 @@
|
|||
{ lib
|
||||
, pkgsBuildHost
|
||||
, rust
|
||||
, stdenv
|
||||
}:
|
||||
|
||||
lib.optionalAttrs stdenv.hostPlatform.isStatic {
|
||||
ROCKSDB_STATIC = "";
|
||||
}
|
||||
//
|
||||
{
|
||||
CARGO_BUILD_RUSTFLAGS =
|
||||
lib.concatStringsSep
|
||||
" "
|
||||
([]
|
||||
# This disables PIE for static builds, which isn't great in terms of
|
||||
# security. Unfortunately, my hand is forced because nixpkgs'
|
||||
# `libstdc++.a` is built without `-fPIE`, which precludes us from
|
||||
# leaving PIE enabled.
|
||||
++ lib.optionals
|
||||
stdenv.hostPlatform.isStatic
|
||||
[ "-C" "relocation-model=static" ]
|
||||
++ lib.optionals
|
||||
(stdenv.buildPlatform.config != stdenv.hostPlatform.config)
|
||||
[ "-l" "c" ]
|
||||
++ lib.optionals
|
||||
# This check has to match the one [here][0]. We only need to set
|
||||
# these flags when using a different linker. Don't ask me why, though,
|
||||
# because I don't know. All I know is it breaks otherwise.
|
||||
#
|
||||
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L37-L40
|
||||
(
|
||||
# Nixpkgs doesn't check for x86_64 here but we do, because I
|
||||
# observed a failure building statically for x86_64 without
|
||||
# including it here. Linkers are weird.
|
||||
(stdenv.hostPlatform.isAarch64 || stdenv.hostPlatform.isx86_64)
|
||||
&& stdenv.hostPlatform.isStatic
|
||||
&& !stdenv.isDarwin
|
||||
&& !stdenv.cc.bintools.isLLVM
|
||||
)
|
||||
[
|
||||
"-l"
|
||||
"stdc++"
|
||||
"-L"
|
||||
"${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib"
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
# What follows is stolen from [here][0]. Its purpose is to properly configure
|
||||
# compilers and linkers for various stages of the build, and even covers the
|
||||
# case of build scripts that need native code compiled and run on the build
|
||||
# platform (I think).
|
||||
#
|
||||
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L57-L80
|
||||
//
|
||||
(
|
||||
let
|
||||
inherit (rust.lib) envVars;
|
||||
in
|
||||
lib.optionalAttrs
|
||||
(stdenv.targetPlatform.rust.rustcTarget
|
||||
!= stdenv.hostPlatform.rust.rustcTarget)
|
||||
(
|
||||
let
|
||||
inherit (stdenv.targetPlatform.rust) cargoEnvVarTarget;
|
||||
in
|
||||
{
|
||||
"CC_${cargoEnvVarTarget}" = envVars.ccForTarget;
|
||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget;
|
||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" =
|
||||
envVars.linkerForTarget;
|
||||
}
|
||||
)
|
||||
//
|
||||
(
|
||||
let
|
||||
inherit (stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget;
|
||||
in
|
||||
{
|
||||
"CC_${cargoEnvVarTarget}" = envVars.ccForHost;
|
||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForHost;
|
||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost;
|
||||
CARGO_BUILD_TARGET = rustcTarget;
|
||||
}
|
||||
)
|
||||
//
|
||||
(
|
||||
let
|
||||
inherit (stdenv.buildPlatform.rust) cargoEnvVarTarget;
|
||||
in
|
||||
{
|
||||
"CC_${cargoEnvVarTarget}" = envVars.ccForBuild;
|
||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild;
|
||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild;
|
||||
HOST_CC = "${pkgsBuildHost.stdenv.cc}/bin/cc";
|
||||
HOST_CXX = "${pkgsBuildHost.stdenv.cc}/bin/c++";
|
||||
}
|
||||
)
|
||||
)
|
95
nix/pkgs/default/default.nix
Normal file
95
nix/pkgs/default/default.nix
Normal file
|
@ -0,0 +1,95 @@
|
|||
# Dependencies (keep sorted)
|
||||
{ craneLib
|
||||
, inputs
|
||||
, lib
|
||||
, pkgsBuildHost
|
||||
, rocksdb
|
||||
, rust
|
||||
, stdenv
|
||||
|
||||
# Options (keep sorted)
|
||||
, default-features ? true
|
||||
, features ? []
|
||||
, profile ? "release"
|
||||
}:
|
||||
|
||||
let
|
||||
buildDepsOnlyEnv =
|
||||
let
|
||||
rocksdb' = rocksdb.override {
|
||||
enableJemalloc = builtins.elem "jemalloc" features;
|
||||
};
|
||||
in
|
||||
{
|
||||
NIX_OUTPATH_USED_AS_RANDOM_SEED = "randomseed"; # https://crane.dev/faq/rebuilds-bindgen.html
|
||||
ROCKSDB_INCLUDE_DIR = "${rocksdb'}/include";
|
||||
ROCKSDB_LIB_DIR = "${rocksdb'}/lib";
|
||||
}
|
||||
//
|
||||
(import ./cross-compilation-env.nix {
|
||||
# Keep sorted
|
||||
inherit
|
||||
lib
|
||||
pkgsBuildHost
|
||||
rust
|
||||
stdenv;
|
||||
});
|
||||
|
||||
buildPackageEnv = {
|
||||
CONDUIT_VERSION_EXTRA = inputs.self.shortRev or inputs.self.dirtyShortRev;
|
||||
} // buildDepsOnlyEnv;
|
||||
|
||||
commonAttrs = {
|
||||
inherit
|
||||
(craneLib.crateNameFromCargoToml {
|
||||
cargoToml = "${inputs.self}/Cargo.toml";
|
||||
})
|
||||
pname
|
||||
version;
|
||||
|
||||
src = let filter = inputs.nix-filter.lib; in filter {
|
||||
root = inputs.self;
|
||||
|
||||
# Keep sorted
|
||||
include = [
|
||||
"Cargo.lock"
|
||||
"Cargo.toml"
|
||||
"src"
|
||||
];
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
# bindgen needs the build platform's libclang. Apparently due to "splicing
|
||||
# weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't quite do the
|
||||
# right thing here.
|
||||
pkgsBuildHost.rustPlatform.bindgenHook
|
||||
];
|
||||
|
||||
CARGO_PROFILE = profile;
|
||||
};
|
||||
in
|
||||
|
||||
craneLib.buildPackage ( commonAttrs // {
|
||||
cargoArtifacts = craneLib.buildDepsOnly (commonAttrs // {
|
||||
env = buildDepsOnlyEnv;
|
||||
});
|
||||
|
||||
cargoExtraArgs = "--locked "
|
||||
+ lib.optionalString
|
||||
(!default-features)
|
||||
"--no-default-features "
|
||||
+ lib.optionalString
|
||||
(features != [])
|
||||
"--features " + (builtins.concatStringsSep "," features);
|
||||
|
||||
# This is redundant with CI
|
||||
doCheck = false;
|
||||
|
||||
env = buildPackageEnv;
|
||||
|
||||
passthru = {
|
||||
env = buildPackageEnv;
|
||||
};
|
||||
|
||||
meta.mainProgram = commonAttrs.pname;
|
||||
})
|
25
nix/pkgs/oci-image/default.nix
Normal file
25
nix/pkgs/oci-image/default.nix
Normal file
|
@ -0,0 +1,25 @@
|
|||
# Keep sorted
|
||||
{ default
|
||||
, dockerTools
|
||||
, lib
|
||||
, tini
|
||||
}:
|
||||
|
||||
dockerTools.buildImage {
|
||||
name = default.pname;
|
||||
tag = "next";
|
||||
copyToRoot = [
|
||||
dockerTools.caCertificates
|
||||
];
|
||||
config = {
|
||||
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT)
|
||||
# are handled as expected
|
||||
Entrypoint = [
|
||||
"${lib.getExe' tini "tini"}"
|
||||
"--"
|
||||
];
|
||||
Cmd = [
|
||||
"${lib.getExe default}"
|
||||
];
|
||||
};
|
||||
}
|
61
nix/shell.nix
Normal file
61
nix/shell.nix
Normal file
|
@ -0,0 +1,61 @@
|
|||
# Keep sorted
|
||||
{ cargo-deb
|
||||
, default
|
||||
, engage
|
||||
, go
|
||||
, inputs
|
||||
, jq
|
||||
, lychee
|
||||
, mdbook
|
||||
, mkShell
|
||||
, olm
|
||||
, system
|
||||
, taplo
|
||||
, toolchain
|
||||
}:
|
||||
|
||||
mkShell {
|
||||
env = default.env // {
|
||||
# Rust Analyzer needs to be able to find the path to default crate
|
||||
# sources, and it can read this environment variable to do so. The
|
||||
# `rust-src` component is required in order for this to work.
|
||||
RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library";
|
||||
};
|
||||
|
||||
# Development tools
|
||||
nativeBuildInputs = [
|
||||
# Always use nightly rustfmt because most of its options are unstable
|
||||
#
|
||||
# This needs to come before `toolchain` in this list, otherwise
|
||||
# `$PATH` will have stable rustfmt instead.
|
||||
inputs.fenix.packages.${system}.latest.rustfmt
|
||||
|
||||
# rust itself
|
||||
toolchain
|
||||
|
||||
# CI tests
|
||||
engage
|
||||
|
||||
# format toml files
|
||||
taplo
|
||||
|
||||
# Needed for producing Debian packages
|
||||
cargo-deb
|
||||
|
||||
# Needed for our script for Complement
|
||||
jq
|
||||
|
||||
# Needed for Complement
|
||||
go
|
||||
olm
|
||||
|
||||
# Needed for our script for Complement
|
||||
jq
|
||||
|
||||
# Needed for finding broken markdown links
|
||||
lychee
|
||||
|
||||
# Useful for editing the book locally
|
||||
mdbook
|
||||
] ++ default.nativeBuildInputs ;
|
||||
}
|
21
rust-toolchain.toml
Normal file
21
rust-toolchain.toml
Normal file
|
@ -0,0 +1,21 @@
|
|||
# This is the authoritiative configuration of this project's Rust toolchain.
|
||||
#
|
||||
# Other files that need upkeep when this changes:
|
||||
#
|
||||
# * `Cargo.toml`
|
||||
# * `flake.nix`
|
||||
#
|
||||
# Search in those files for `rust-toolchain.toml` to find the relevant places.
|
||||
# If you're having trouble making the relevant changes, bug a maintainer.
|
||||
|
||||
[toolchain]
|
||||
channel = "1.79.0"
|
||||
components = [
|
||||
# For rust-analyzer
|
||||
"rust-src",
|
||||
]
|
||||
targets = [
|
||||
"aarch64-unknown-linux-musl",
|
||||
"x86_64-unknown-linux-gnu",
|
||||
"x86_64-unknown-linux-musl",
|
||||
]
|
|
@ -1,2 +1,2 @@
|
|||
unstable_features = true
|
||||
imports_granularity = "Crate"
|
||||
unstable_features = true
|
||||
|
|
|
@ -1,23 +1,34 @@
|
|||
use crate::{services, utils, Error, Result};
|
||||
use bytes::BytesMut;
|
||||
use ruma::api::{IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken};
|
||||
use ruma::api::{
|
||||
appservice::Registration, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken,
|
||||
};
|
||||
use std::{fmt::Debug, mem, time::Duration};
|
||||
use tracing::warn;
|
||||
|
||||
/// Sends a request to an appservice
|
||||
///
|
||||
/// Only returns None if there is no url specified in the appservice registration file
|
||||
#[tracing::instrument(skip(request))]
|
||||
pub(crate) async fn send_request<T: OutgoingRequest>(
|
||||
registration: serde_yaml::Value,
|
||||
pub(crate) async fn send_request<T>(
|
||||
registration: Registration,
|
||||
request: T,
|
||||
) -> Result<T::IncomingResponse>
|
||||
) -> Result<Option<T::IncomingResponse>>
|
||||
where
|
||||
T: Debug,
|
||||
T: OutgoingRequest + Debug,
|
||||
{
|
||||
let destination = registration.get("url").unwrap().as_str().unwrap();
|
||||
let hs_token = registration.get("hs_token").unwrap().as_str().unwrap();
|
||||
let destination = match registration.url {
|
||||
Some(url) => url,
|
||||
None => {
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
|
||||
let hs_token = registration.hs_token.as_str();
|
||||
|
||||
let mut http_request = request
|
||||
.try_into_http_request::<BytesMut>(
|
||||
destination,
|
||||
&destination,
|
||||
SendAccessToken::IfRequired(hs_token),
|
||||
&[MatrixVersion::V1_0],
|
||||
)
|
||||
|
@ -39,8 +50,7 @@ where
|
|||
);
|
||||
*http_request.uri_mut() = parts.try_into().expect("our manipulation is always valid");
|
||||
|
||||
let mut reqwest_request = reqwest::Request::try_from(http_request)
|
||||
.expect("all http requests are valid reqwest requests");
|
||||
let mut reqwest_request = reqwest::Request::try_from(http_request)?;
|
||||
|
||||
*reqwest_request.timeout_mut() = Some(Duration::from_secs(30));
|
||||
|
||||
|
@ -55,9 +65,7 @@ where
|
|||
Err(e) => {
|
||||
warn!(
|
||||
"Could not send request to appservice {:?} at {}: {}",
|
||||
registration.get("id"),
|
||||
destination,
|
||||
e
|
||||
registration.id, destination, e
|
||||
);
|
||||
return Err(e.into());
|
||||
}
|
||||
|
@ -95,7 +103,8 @@ where
|
|||
.body(body)
|
||||
.expect("reqwest body is valid http body"),
|
||||
);
|
||||
response.map_err(|_| {
|
||||
|
||||
response.map(Some).map_err(|_| {
|
||||
warn!(
|
||||
"Appservice returned invalid response bytes {}\n{}",
|
||||
destination, url
|
||||
|
|
|
@ -3,7 +3,8 @@ use crate::{api::client_server, services, utils, Error, Result, Ruma};
|
|||
use ruma::{
|
||||
api::client::{
|
||||
account::{
|
||||
change_password, deactivate, get_3pids, get_username_availability, register,
|
||||
change_password, deactivate, get_3pids, get_username_availability,
|
||||
register::{self, LoginType},
|
||||
request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn,
|
||||
whoami, ThirdPartyIdRemovalStatus,
|
||||
},
|
||||
|
@ -74,12 +75,9 @@ pub async fn get_register_available_route(
|
|||
/// - Creates a new account and populates it with default account data
|
||||
/// - If `inhibit_login` is false: Creates a device and returns device id and access_token
|
||||
pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<register::v3::Response> {
|
||||
if !services().globals.allow_registration()
|
||||
&& !body.from_appservice
|
||||
&& services().globals.config.registration_token.is_none()
|
||||
{
|
||||
if !services().globals.allow_registration().await && body.appservice_info.is_none() {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"Registration has been disabled.",
|
||||
));
|
||||
}
|
||||
|
@ -121,22 +119,56 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
|
|||
},
|
||||
};
|
||||
|
||||
// UIAA
|
||||
let mut uiaainfo = UiaaInfo {
|
||||
flows: vec![AuthFlow {
|
||||
stages: if services().globals.config.registration_token.is_some() {
|
||||
vec![AuthType::RegistrationToken]
|
||||
if body.body.login_type == Some(LoginType::ApplicationService) {
|
||||
if let Some(ref info) = body.appservice_info {
|
||||
if !info.is_user_match(&user_id) {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"User is not in namespace.",
|
||||
));
|
||||
}
|
||||
} else {
|
||||
vec![AuthType::Dummy]
|
||||
},
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::MissingToken,
|
||||
"Missing appservice token.",
|
||||
));
|
||||
}
|
||||
} else if services().appservice.is_exclusive_user_id(&user_id).await {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"User id reserved by appservice.",
|
||||
));
|
||||
}
|
||||
|
||||
// UIAA
|
||||
let mut uiaainfo;
|
||||
let skip_auth = if services().globals.config.registration_token.is_some() {
|
||||
// Registration token required
|
||||
uiaainfo = UiaaInfo {
|
||||
flows: vec![AuthFlow {
|
||||
stages: vec![AuthType::RegistrationToken],
|
||||
}],
|
||||
completed: Vec::new(),
|
||||
params: Default::default(),
|
||||
session: None,
|
||||
auth_error: None,
|
||||
};
|
||||
body.appservice_info.is_some()
|
||||
} else {
|
||||
// No registration token necessary, but clients must still go through the flow
|
||||
uiaainfo = UiaaInfo {
|
||||
flows: vec![AuthFlow {
|
||||
stages: vec![AuthType::Dummy],
|
||||
}],
|
||||
completed: Vec::new(),
|
||||
params: Default::default(),
|
||||
session: None,
|
||||
auth_error: None,
|
||||
};
|
||||
body.appservice_info.is_some() || is_guest
|
||||
};
|
||||
|
||||
if !body.from_appservice && !is_guest {
|
||||
if !skip_auth {
|
||||
if let Some(auth) = &body.auth {
|
||||
let (worked, uiaainfo) = services().uiaa.try_auth(
|
||||
&UserId::parse_with_server_name("", services().globals.server_name())
|
||||
|
@ -229,7 +261,7 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
|
|||
)?;
|
||||
|
||||
info!("New user {} registered on this server.", user_id);
|
||||
if !body.from_appservice && !is_guest {
|
||||
if body.appservice_info.is_none() && !is_guest {
|
||||
services()
|
||||
.admin
|
||||
.send_message(RoomMessageEventContent::notice_plain(format!(
|
||||
|
@ -239,7 +271,14 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
|
|||
|
||||
// If this is the first real user, grant them admin privileges
|
||||
// Note: the server user, @conduit:servername, is generated first
|
||||
if services().users.count()? == 2 {
|
||||
if !is_guest {
|
||||
if let Some(admin_room) = services().admin.get_admin_room()? {
|
||||
if services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_joined_count(&admin_room)?
|
||||
== Some(1)
|
||||
{
|
||||
services()
|
||||
.admin
|
||||
.make_user_admin(&user_id, displayname)
|
||||
|
@ -247,6 +286,8 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
|
|||
|
||||
warn!("Granting {} admin privileges as the first user", user_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(register::v3::Response {
|
||||
access_token: Some(token),
|
||||
|
@ -274,7 +315,11 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
|
|||
pub async fn change_password_route(
|
||||
body: Ruma<change_password::v3::Request>,
|
||||
) -> Result<change_password::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
let sender_user = body
|
||||
.sender_user
|
||||
.as_ref()
|
||||
// In the future password changes could be performed with UIA with 3PIDs, but we don't support that currently
|
||||
.ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?;
|
||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||
|
||||
let mut uiaainfo = UiaaInfo {
|
||||
|
@ -344,7 +389,7 @@ pub async fn whoami_route(body: Ruma<whoami::v3::Request>) -> Result<whoami::v3:
|
|||
Ok(whoami::v3::Response {
|
||||
user_id: sender_user.clone(),
|
||||
device_id,
|
||||
is_guest: services().users.is_deactivated(sender_user)? && !body.from_appservice,
|
||||
is_guest: services().users.is_deactivated(sender_user)? && body.appservice_info.is_none(),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -361,7 +406,11 @@ pub async fn whoami_route(body: Ruma<whoami::v3::Request>) -> Result<whoami::v3:
|
|||
pub async fn deactivate_route(
|
||||
body: Ruma<deactivate::v3::Request>,
|
||||
) -> Result<deactivate::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
let sender_user = body
|
||||
.sender_user
|
||||
.as_ref()
|
||||
// In the future password changes could be performed with UIA with SSO, but we don't support that currently
|
||||
.ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?;
|
||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||
|
||||
let mut uiaainfo = UiaaInfo {
|
||||
|
@ -434,7 +483,7 @@ pub async fn request_3pid_management_token_via_email_route(
|
|||
) -> Result<request_3pid_management_token_via_email::v3::Response> {
|
||||
Err(Error::BadRequest(
|
||||
ErrorKind::ThreepidDenied,
|
||||
"Third party identifier is not allowed",
|
||||
"Third party identifiers are currently unsupported by this server implementation",
|
||||
))
|
||||
}
|
||||
|
||||
|
@ -448,6 +497,6 @@ pub async fn request_3pid_management_token_via_msisdn_route(
|
|||
) -> Result<request_3pid_management_token_via_msisdn::v3::Response> {
|
||||
Err(Error::BadRequest(
|
||||
ErrorKind::ThreepidDenied,
|
||||
"Third party identifier is not allowed",
|
||||
"Third party identifiers are currently unsupported by this server implementation",
|
||||
))
|
||||
}
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
use crate::{services, Error, Result, Ruma};
|
||||
use rand::seq::SliceRandom;
|
||||
use regex::Regex;
|
||||
use ruma::{
|
||||
api::{
|
||||
appservice,
|
||||
|
@ -19,6 +18,8 @@ use ruma::{
|
|||
pub async fn create_alias_route(
|
||||
body: Ruma<create_alias::v3::Request>,
|
||||
) -> Result<create_alias::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
|
||||
if body.room_alias.server_name() != services().globals.server_name() {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
|
@ -26,6 +27,24 @@ pub async fn create_alias_route(
|
|||
));
|
||||
}
|
||||
|
||||
if let Some(ref info) = body.appservice_info {
|
||||
if !info.aliases.is_match(body.room_alias.as_str()) {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"Room alias is not in namespace.",
|
||||
));
|
||||
}
|
||||
} else if services()
|
||||
.appservice
|
||||
.is_exclusive_alias(&body.room_alias)
|
||||
.await
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"Room alias reserved by appservice.",
|
||||
));
|
||||
}
|
||||
|
||||
if services()
|
||||
.rooms
|
||||
.alias
|
||||
|
@ -38,7 +57,7 @@ pub async fn create_alias_route(
|
|||
services()
|
||||
.rooms
|
||||
.alias
|
||||
.set_alias(&body.room_alias, &body.room_id)?;
|
||||
.set_alias(&body.room_alias, &body.room_id, sender_user)?;
|
||||
|
||||
Ok(create_alias::v3::Response::new())
|
||||
}
|
||||
|
@ -47,11 +66,12 @@ pub async fn create_alias_route(
|
|||
///
|
||||
/// Deletes a room alias from this server.
|
||||
///
|
||||
/// - TODO: additional access control checks
|
||||
/// - TODO: Update canonical alias event
|
||||
pub async fn delete_alias_route(
|
||||
body: Ruma<delete_alias::v3::Request>,
|
||||
) -> Result<delete_alias::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
|
||||
if body.room_alias.server_name() != services().globals.server_name() {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
|
@ -59,7 +79,28 @@ pub async fn delete_alias_route(
|
|||
));
|
||||
}
|
||||
|
||||
services().rooms.alias.remove_alias(&body.room_alias)?;
|
||||
if let Some(ref info) = body.appservice_info {
|
||||
if !info.aliases.is_match(body.room_alias.as_str()) {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"Room alias is not in namespace.",
|
||||
));
|
||||
}
|
||||
} else if services()
|
||||
.appservice
|
||||
.is_exclusive_alias(&body.room_alias)
|
||||
.await
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"Room alias reserved by appservice.",
|
||||
));
|
||||
}
|
||||
|
||||
services()
|
||||
.rooms
|
||||
.alias
|
||||
.remove_alias(&body.room_alias, sender_user)?;
|
||||
|
||||
// TODO: update alt_aliases?
|
||||
|
||||
|
@ -101,31 +142,20 @@ pub(crate) async fn get_alias_helper(
|
|||
match services().rooms.alias.resolve_local_alias(&room_alias)? {
|
||||
Some(r) => room_id = Some(r),
|
||||
None => {
|
||||
for (_id, registration) in services().appservice.all()? {
|
||||
let aliases = registration
|
||||
.get("namespaces")
|
||||
.and_then(|ns| ns.get("aliases"))
|
||||
.and_then(|aliases| aliases.as_sequence())
|
||||
.map_or_else(Vec::new, |aliases| {
|
||||
aliases
|
||||
.iter()
|
||||
.filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok())
|
||||
.collect::<Vec<_>>()
|
||||
});
|
||||
|
||||
if aliases
|
||||
.iter()
|
||||
.any(|aliases| aliases.is_match(room_alias.as_str()))
|
||||
&& services()
|
||||
for appservice in services().appservice.read().await.values() {
|
||||
if appservice.aliases.is_match(room_alias.as_str())
|
||||
&& matches!(
|
||||
services()
|
||||
.sending
|
||||
.send_appservice_request(
|
||||
registration,
|
||||
appservice.registration.clone(),
|
||||
appservice::query::query_room_alias::v1::Request {
|
||||
room_alias: room_alias.clone(),
|
||||
},
|
||||
)
|
||||
.await
|
||||
.is_ok()
|
||||
.await,
|
||||
Ok(Some(_opt_result))
|
||||
)
|
||||
{
|
||||
room_id = Some(
|
||||
services()
|
||||
|
|
|
@ -54,7 +54,7 @@ pub async fn get_context_route(
|
|||
.user_can_see_event(sender_user, &room_id, &body.event_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"You don't have permission to view this event.",
|
||||
));
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ pub async fn update_device_route(
|
|||
.get_device_metadata(sender_user, &body.device_id)?
|
||||
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?;
|
||||
|
||||
device.display_name = body.display_name.clone();
|
||||
device.display_name.clone_from(&body.display_name);
|
||||
|
||||
services()
|
||||
.users
|
||||
|
|
|
@ -339,17 +339,19 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
|||
|
||||
let mut failures = BTreeMap::new();
|
||||
|
||||
let back_off = |id| match services()
|
||||
let back_off = |id| async {
|
||||
match services()
|
||||
.globals
|
||||
.bad_query_ratelimiter
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(id)
|
||||
{
|
||||
hash_map::Entry::Vacant(e) => {
|
||||
e.insert((Instant::now(), 1));
|
||||
}
|
||||
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
||||
}
|
||||
};
|
||||
|
||||
let mut futures: FuturesUnordered<_> = get_over_federation
|
||||
|
@ -359,8 +361,8 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
|||
.globals
|
||||
.bad_query_ratelimiter
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(&*server)
|
||||
.await
|
||||
.get(server)
|
||||
{
|
||||
// Exponential backoff
|
||||
let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries);
|
||||
|
@ -393,7 +395,7 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
|||
),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| Error::BadServerResponse("Query took too long")),
|
||||
.map_err(|_e| Error::BadServerResponse("Query took too long")),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
@ -428,7 +430,8 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
|||
device_keys.extend(response.device_keys);
|
||||
}
|
||||
_ => {
|
||||
back_off(server.to_owned());
|
||||
back_off(server.to_owned()).await;
|
||||
|
||||
failures.insert(server.to_string(), json!({}));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,12 +1,24 @@
|
|||
// Unauthenticated media is deprecated
|
||||
#![allow(deprecated)]
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::{service::media::FileMeta, services, utils, Error, Result, Ruma};
|
||||
use ruma::api::client::{
|
||||
error::ErrorKind,
|
||||
media::{
|
||||
create_content, get_content, get_content_as_filename, get_content_thumbnail,
|
||||
get_media_config,
|
||||
use http::header::{CONTENT_DISPOSITION, CONTENT_TYPE};
|
||||
use ruma::{
|
||||
api::{
|
||||
client::{
|
||||
authenticated_media::{
|
||||
get_content, get_content_as_filename, get_content_thumbnail, get_media_config,
|
||||
},
|
||||
error::ErrorKind,
|
||||
media::{self, create_content},
|
||||
},
|
||||
federation::authenticated_media::{self as federation_media, FileOrLocation},
|
||||
},
|
||||
http_headers::{ContentDisposition, ContentDispositionType},
|
||||
media::Method,
|
||||
ServerName, UInt,
|
||||
};
|
||||
|
||||
const MXC_LENGTH: usize = 32;
|
||||
|
@ -15,9 +27,20 @@ const MXC_LENGTH: usize = 32;
|
|||
///
|
||||
/// Returns max upload size.
|
||||
pub async fn get_media_config_route(
|
||||
_body: Ruma<get_media_config::v3::Request>,
|
||||
) -> Result<get_media_config::v3::Response> {
|
||||
Ok(get_media_config::v3::Response {
|
||||
_body: Ruma<media::get_media_config::v3::Request>,
|
||||
) -> Result<media::get_media_config::v3::Response> {
|
||||
Ok(media::get_media_config::v3::Response {
|
||||
upload_size: services().globals.max_request_size().into(),
|
||||
})
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/v1/media/config`
|
||||
///
|
||||
/// Returns max upload size.
|
||||
pub async fn get_media_config_auth_route(
|
||||
_body: Ruma<get_media_config::v1::Request>,
|
||||
) -> Result<get_media_config::v1::Response> {
|
||||
Ok(get_media_config::v1::Response {
|
||||
upload_size: services().globals.max_request_size().into(),
|
||||
})
|
||||
}
|
||||
|
@ -41,45 +64,84 @@ pub async fn create_content_route(
|
|||
.media
|
||||
.create(
|
||||
mxc.clone(),
|
||||
body.filename
|
||||
.as_ref()
|
||||
.map(|filename| "inline; filename=".to_owned() + filename)
|
||||
.as_deref(),
|
||||
Some(
|
||||
ContentDisposition::new(ContentDispositionType::Inline)
|
||||
.with_filename(body.filename.clone()),
|
||||
),
|
||||
body.content_type.as_deref(),
|
||||
&body.file,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(create_content::v3::Response {
|
||||
content_uri: mxc.try_into().expect("Invalid mxc:// URI"),
|
||||
content_uri: mxc.into(),
|
||||
blurhash: None,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn get_remote_content(
|
||||
mxc: &str,
|
||||
server_name: &ruma::ServerName,
|
||||
server_name: &ServerName,
|
||||
media_id: String,
|
||||
) -> Result<get_content::v3::Response, Error> {
|
||||
let content_response = services()
|
||||
) -> Result<get_content::v1::Response, Error> {
|
||||
let content_response = match services()
|
||||
.sending
|
||||
.send_federation_request(
|
||||
server_name,
|
||||
get_content::v3::Request {
|
||||
allow_remote: false,
|
||||
federation_media::get_content::v1::Request {
|
||||
media_id: media_id.clone(),
|
||||
timeout_ms: Duration::from_secs(20),
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(federation_media::get_content::v1::Response {
|
||||
metadata: _,
|
||||
content: FileOrLocation::File(content),
|
||||
}) => get_content::v1::Response {
|
||||
file: content.file,
|
||||
content_type: content.content_type,
|
||||
content_disposition: content.content_disposition,
|
||||
},
|
||||
|
||||
Ok(federation_media::get_content::v1::Response {
|
||||
metadata: _,
|
||||
content: FileOrLocation::Location(url),
|
||||
}) => get_location_content(url).await?,
|
||||
Err(Error::BadRequest(ErrorKind::Unrecognized, _)) => {
|
||||
let media::get_content::v3::Response {
|
||||
file,
|
||||
content_type,
|
||||
content_disposition,
|
||||
..
|
||||
} = services()
|
||||
.sending
|
||||
.send_federation_request(
|
||||
server_name,
|
||||
media::get_content::v3::Request {
|
||||
server_name: server_name.to_owned(),
|
||||
media_id,
|
||||
timeout_ms: Duration::from_secs(20),
|
||||
allow_redirect: false,
|
||||
allow_remote: false,
|
||||
allow_redirect: true,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
get_content::v1::Response {
|
||||
file,
|
||||
content_type,
|
||||
content_disposition,
|
||||
}
|
||||
}
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
services()
|
||||
.media
|
||||
.create(
|
||||
mxc.to_owned(),
|
||||
content_response.content_disposition.as_deref(),
|
||||
content_response.content_disposition.clone(),
|
||||
content_response.content_type.as_deref(),
|
||||
&content_response.file,
|
||||
)
|
||||
|
@ -94,26 +156,58 @@ pub async fn get_remote_content(
|
|||
///
|
||||
/// - Only allows federation if `allow_remote` is true
|
||||
pub async fn get_content_route(
|
||||
body: Ruma<get_content::v3::Request>,
|
||||
) -> Result<get_content::v3::Response> {
|
||||
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
||||
|
||||
if let Some(FileMeta {
|
||||
body: Ruma<media::get_content::v3::Request>,
|
||||
) -> Result<media::get_content::v3::Response> {
|
||||
let get_content::v1::Response {
|
||||
file,
|
||||
content_disposition,
|
||||
content_type,
|
||||
file,
|
||||
}) = services().media.get(mxc.clone()).await?
|
||||
{
|
||||
Ok(get_content::v3::Response {
|
||||
} = get_content(&body.server_name, body.media_id.clone(), body.allow_remote).await?;
|
||||
|
||||
Ok(media::get_content::v3::Response {
|
||||
file,
|
||||
content_type,
|
||||
content_disposition,
|
||||
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||
})
|
||||
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}`
|
||||
///
|
||||
/// Load media from our server or over federation.
|
||||
pub async fn get_content_auth_route(
|
||||
body: Ruma<get_content::v1::Request>,
|
||||
) -> Result<get_content::v1::Response> {
|
||||
get_content(&body.server_name, body.media_id.clone(), true).await
|
||||
}
|
||||
|
||||
async fn get_content(
|
||||
server_name: &ServerName,
|
||||
media_id: String,
|
||||
allow_remote: bool,
|
||||
) -> Result<get_content::v1::Response, Error> {
|
||||
let mxc = format!("mxc://{}/{}", server_name, media_id);
|
||||
|
||||
if let Ok(Some(FileMeta {
|
||||
content_disposition,
|
||||
content_type,
|
||||
file,
|
||||
})) = services().media.get(mxc.clone()).await
|
||||
{
|
||||
Ok(get_content::v1::Response {
|
||||
file,
|
||||
content_type,
|
||||
content_disposition: Some(content_disposition),
|
||||
})
|
||||
} else if server_name != services().globals.server_name() && allow_remote {
|
||||
let remote_content_response =
|
||||
get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?;
|
||||
Ok(remote_content_response)
|
||||
get_remote_content(&mxc, server_name, media_id.clone()).await?;
|
||||
|
||||
Ok(get_content::v1::Response {
|
||||
content_disposition: remote_content_response.content_disposition,
|
||||
content_type: remote_content_response.content_type,
|
||||
file: remote_content_response.file,
|
||||
})
|
||||
} else {
|
||||
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
||||
}
|
||||
|
@ -125,31 +219,74 @@ pub async fn get_content_route(
|
|||
///
|
||||
/// - Only allows federation if `allow_remote` is true
|
||||
pub async fn get_content_as_filename_route(
|
||||
body: Ruma<get_content_as_filename::v3::Request>,
|
||||
) -> Result<get_content_as_filename::v3::Response> {
|
||||
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
||||
body: Ruma<media::get_content_as_filename::v3::Request>,
|
||||
) -> Result<media::get_content_as_filename::v3::Response> {
|
||||
let get_content_as_filename::v1::Response {
|
||||
file,
|
||||
content_type,
|
||||
content_disposition,
|
||||
} = get_content_as_filename(
|
||||
&body.server_name,
|
||||
body.media_id.clone(),
|
||||
body.filename.clone(),
|
||||
body.allow_remote,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if let Some(FileMeta {
|
||||
content_disposition: _,
|
||||
content_type,
|
||||
file,
|
||||
}) = services().media.get(mxc.clone()).await?
|
||||
{
|
||||
Ok(get_content_as_filename::v3::Response {
|
||||
Ok(media::get_content_as_filename::v3::Response {
|
||||
file,
|
||||
content_type,
|
||||
content_disposition: Some(format!("inline; filename={}", body.filename)),
|
||||
content_disposition,
|
||||
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||
})
|
||||
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
|
||||
let remote_content_response =
|
||||
get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?;
|
||||
}
|
||||
|
||||
Ok(get_content_as_filename::v3::Response {
|
||||
content_disposition: Some(format!("inline: filename={}", body.filename)),
|
||||
/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}/{fileName}`
|
||||
///
|
||||
/// Load media from our server or over federation, permitting desired filename.
|
||||
pub async fn get_content_as_filename_auth_route(
|
||||
body: Ruma<get_content_as_filename::v1::Request>,
|
||||
) -> Result<get_content_as_filename::v1::Response, Error> {
|
||||
get_content_as_filename(
|
||||
&body.server_name,
|
||||
body.media_id.clone(),
|
||||
body.filename.clone(),
|
||||
true,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_content_as_filename(
|
||||
server_name: &ServerName,
|
||||
media_id: String,
|
||||
filename: String,
|
||||
allow_remote: bool,
|
||||
) -> Result<get_content_as_filename::v1::Response, Error> {
|
||||
let mxc = format!("mxc://{}/{}", server_name, media_id);
|
||||
|
||||
if let Ok(Some(FileMeta {
|
||||
file, content_type, ..
|
||||
})) = services().media.get(mxc.clone()).await
|
||||
{
|
||||
Ok(get_content_as_filename::v1::Response {
|
||||
file,
|
||||
content_type,
|
||||
content_disposition: Some(
|
||||
ContentDisposition::new(ContentDispositionType::Inline)
|
||||
.with_filename(Some(filename.clone())),
|
||||
),
|
||||
})
|
||||
} else if server_name != services().globals.server_name() && allow_remote {
|
||||
let remote_content_response =
|
||||
get_remote_content(&mxc, server_name, media_id.clone()).await?;
|
||||
|
||||
Ok(get_content_as_filename::v1::Response {
|
||||
content_disposition: Some(
|
||||
ContentDisposition::new(ContentDispositionType::Inline)
|
||||
.with_filename(Some(filename.clone())),
|
||||
),
|
||||
content_type: remote_content_response.content_type,
|
||||
file: remote_content_response.file,
|
||||
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||
})
|
||||
} else {
|
||||
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
||||
|
@ -162,62 +299,169 @@ pub async fn get_content_as_filename_route(
|
|||
///
|
||||
/// - Only allows federation if `allow_remote` is true
|
||||
pub async fn get_content_thumbnail_route(
|
||||
body: Ruma<get_content_thumbnail::v3::Request>,
|
||||
) -> Result<get_content_thumbnail::v3::Response> {
|
||||
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
||||
|
||||
if let Some(FileMeta {
|
||||
content_type, file, ..
|
||||
}) = services()
|
||||
.media
|
||||
.get_thumbnail(
|
||||
mxc.clone(),
|
||||
body.width
|
||||
.try_into()
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
|
||||
body.height
|
||||
.try_into()
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
|
||||
body: Ruma<media::get_content_thumbnail::v3::Request>,
|
||||
) -> Result<media::get_content_thumbnail::v3::Response> {
|
||||
let get_content_thumbnail::v1::Response { file, content_type } = get_content_thumbnail(
|
||||
&body.server_name,
|
||||
body.media_id.clone(),
|
||||
body.height,
|
||||
body.width,
|
||||
body.method.clone(),
|
||||
body.animated,
|
||||
body.allow_remote,
|
||||
)
|
||||
.await?
|
||||
{
|
||||
Ok(get_content_thumbnail::v3::Response {
|
||||
.await?;
|
||||
|
||||
Ok(media::get_content_thumbnail::v3::Response {
|
||||
file,
|
||||
content_type,
|
||||
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||
})
|
||||
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
|
||||
let get_thumbnail_response = services()
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/v1/media/thumbnail/{serverName}/{mediaId}`
|
||||
///
|
||||
/// Load media thumbnail from our server or over federation.
|
||||
pub async fn get_content_thumbnail_auth_route(
|
||||
body: Ruma<get_content_thumbnail::v1::Request>,
|
||||
) -> Result<get_content_thumbnail::v1::Response> {
|
||||
get_content_thumbnail(
|
||||
&body.server_name,
|
||||
body.media_id.clone(),
|
||||
body.height,
|
||||
body.width,
|
||||
body.method.clone(),
|
||||
body.animated,
|
||||
true,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_content_thumbnail(
|
||||
server_name: &ServerName,
|
||||
media_id: String,
|
||||
height: UInt,
|
||||
width: UInt,
|
||||
method: Option<Method>,
|
||||
animated: Option<bool>,
|
||||
allow_remote: bool,
|
||||
) -> Result<get_content_thumbnail::v1::Response, Error> {
|
||||
let mxc = format!("mxc://{}/{}", server_name, media_id);
|
||||
|
||||
if let Ok(Some(FileMeta {
|
||||
file, content_type, ..
|
||||
})) = services()
|
||||
.media
|
||||
.get_thumbnail(
|
||||
mxc.clone(),
|
||||
width
|
||||
.try_into()
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
|
||||
height
|
||||
.try_into()
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Height is invalid."))?,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(get_content_thumbnail::v1::Response { file, content_type })
|
||||
} else if server_name != services().globals.server_name() && allow_remote {
|
||||
let thumbnail_response = match services()
|
||||
.sending
|
||||
.send_federation_request(
|
||||
&body.server_name,
|
||||
get_content_thumbnail::v3::Request {
|
||||
allow_remote: false,
|
||||
height: body.height,
|
||||
width: body.width,
|
||||
method: body.method.clone(),
|
||||
server_name: body.server_name.clone(),
|
||||
media_id: body.media_id.clone(),
|
||||
server_name,
|
||||
federation_media::get_content_thumbnail::v1::Request {
|
||||
height,
|
||||
width,
|
||||
method: method.clone(),
|
||||
media_id: media_id.clone(),
|
||||
timeout_ms: Duration::from_secs(20),
|
||||
animated,
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(federation_media::get_content_thumbnail::v1::Response {
|
||||
metadata: _,
|
||||
content: FileOrLocation::File(content),
|
||||
}) => get_content_thumbnail::v1::Response {
|
||||
file: content.file,
|
||||
content_type: content.content_type,
|
||||
},
|
||||
|
||||
Ok(federation_media::get_content_thumbnail::v1::Response {
|
||||
metadata: _,
|
||||
content: FileOrLocation::Location(url),
|
||||
}) => {
|
||||
let get_content::v1::Response {
|
||||
file, content_type, ..
|
||||
} = get_location_content(url).await?;
|
||||
|
||||
get_content_thumbnail::v1::Response { file, content_type }
|
||||
}
|
||||
Err(Error::BadRequest(ErrorKind::Unrecognized, _)) => {
|
||||
let media::get_content_thumbnail::v3::Response {
|
||||
file, content_type, ..
|
||||
} = services()
|
||||
.sending
|
||||
.send_federation_request(
|
||||
server_name,
|
||||
media::get_content_thumbnail::v3::Request {
|
||||
height,
|
||||
width,
|
||||
method: method.clone(),
|
||||
server_name: server_name.to_owned(),
|
||||
media_id: media_id.clone(),
|
||||
timeout_ms: Duration::from_secs(20),
|
||||
allow_redirect: false,
|
||||
animated,
|
||||
allow_remote: false,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
get_content_thumbnail::v1::Response { file, content_type }
|
||||
}
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
services()
|
||||
.media
|
||||
.upload_thumbnail(
|
||||
mxc,
|
||||
None,
|
||||
get_thumbnail_response.content_type.as_deref(),
|
||||
body.width.try_into().expect("all UInts are valid u32s"),
|
||||
body.height.try_into().expect("all UInts are valid u32s"),
|
||||
&get_thumbnail_response.file,
|
||||
thumbnail_response.content_type.as_deref(),
|
||||
width.try_into().expect("all UInts are valid u32s"),
|
||||
height.try_into().expect("all UInts are valid u32s"),
|
||||
&thumbnail_response.file,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(get_thumbnail_response)
|
||||
Ok(thumbnail_response)
|
||||
} else {
|
||||
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_location_content(url: String) -> Result<get_content::v1::Response, Error> {
|
||||
let client = services().globals.default_client();
|
||||
let response = client.get(url).send().await?;
|
||||
let headers = response.headers();
|
||||
|
||||
let content_type = headers
|
||||
.get(CONTENT_TYPE)
|
||||
.and_then(|header| header.to_str().ok())
|
||||
.map(ToOwned::to_owned);
|
||||
|
||||
let content_disposition = headers
|
||||
.get(CONTENT_DISPOSITION)
|
||||
.map(|header| header.as_bytes())
|
||||
.map(TryFrom::try_from)
|
||||
.and_then(Result::ok);
|
||||
|
||||
let file = response.bytes().await?.to_vec();
|
||||
|
||||
Ok(get_content::v1::Response {
|
||||
file,
|
||||
content_type,
|
||||
content_disposition,
|
||||
})
|
||||
}
|
||||
|
|
|
@ -15,24 +15,26 @@ use ruma::{
|
|||
room::{
|
||||
join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent},
|
||||
member::{MembershipState, RoomMemberEventContent},
|
||||
power_levels::RoomPowerLevelsEventContent,
|
||||
},
|
||||
StateEventType, TimelineEventType,
|
||||
},
|
||||
serde::Base64,
|
||||
state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId,
|
||||
OwnedServerName, OwnedUserId, RoomId, RoomVersionId, UserId,
|
||||
state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch,
|
||||
OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, RoomVersionId, UserId,
|
||||
};
|
||||
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
||||
use std::{
|
||||
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet},
|
||||
sync::{Arc, RwLock},
|
||||
sync::Arc,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use crate::{
|
||||
service::pdu::{gen_event_id_canonical_json, PduBuilder},
|
||||
service::{
|
||||
globals::SigningKeys,
|
||||
pdu::{gen_event_id_canonical_json, PduBuilder},
|
||||
},
|
||||
services, utils, Error, PduEvent, Result, Ruma,
|
||||
};
|
||||
|
||||
|
@ -64,7 +66,12 @@ pub async fn join_room_by_id_route(
|
|||
.map(|user| user.server_name().to_owned()),
|
||||
);
|
||||
|
||||
servers.push(body.room_id.server_name().to_owned());
|
||||
servers.push(
|
||||
body.room_id
|
||||
.server_name()
|
||||
.expect("Room IDs should always have a server name")
|
||||
.into(),
|
||||
);
|
||||
|
||||
join_room_by_id_helper(
|
||||
body.sender_user.as_deref(),
|
||||
|
@ -90,7 +97,7 @@ pub async fn join_room_by_id_or_alias_route(
|
|||
|
||||
let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) {
|
||||
Ok(room_id) => {
|
||||
let mut servers = body.server_name.clone();
|
||||
let mut servers = body.via.clone();
|
||||
servers.extend(
|
||||
services()
|
||||
.rooms
|
||||
|
@ -105,7 +112,12 @@ pub async fn join_room_by_id_or_alias_route(
|
|||
.map(|user| user.server_name().to_owned()),
|
||||
);
|
||||
|
||||
servers.push(room_id.server_name().to_owned());
|
||||
servers.push(
|
||||
room_id
|
||||
.server_name()
|
||||
.expect("Room IDs should always have a server name")
|
||||
.into(),
|
||||
);
|
||||
|
||||
(servers, room_id)
|
||||
}
|
||||
|
@ -176,7 +188,7 @@ pub async fn kick_user_route(
|
|||
) -> Result<kick_user::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
|
||||
let mut event: RoomMemberEventContent = serde_json::from_str(
|
||||
let event: RoomMemberEventContent = serde_json::from_str(
|
||||
services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
|
@ -187,39 +199,55 @@ pub async fn kick_user_route(
|
|||
)?
|
||||
.ok_or(Error::BadRequest(
|
||||
ErrorKind::BadState,
|
||||
"Cannot kick member that's not in the room.",
|
||||
"Cannot kick a user who is not in the room.",
|
||||
))?
|
||||
.content
|
||||
.get(),
|
||||
)
|
||||
.map_err(|_| Error::bad_database("Invalid member event in database."))?;
|
||||
|
||||
event.membership = MembershipState::Leave;
|
||||
event.reason = body.reason.clone();
|
||||
// If they are already kicked and the reason is unchanged, there isn't any point in sending a new event.
|
||||
if event.membership == MembershipState::Leave && event.reason == body.reason {
|
||||
return Ok(kick_user::v3::Response {});
|
||||
}
|
||||
|
||||
let event = RoomMemberEventContent {
|
||||
is_direct: None,
|
||||
membership: MembershipState::Leave,
|
||||
third_party_invite: None,
|
||||
reason: body.reason.clone(),
|
||||
join_authorized_via_users_server: None,
|
||||
..event
|
||||
};
|
||||
|
||||
let mutex_state = Arc::clone(
|
||||
services()
|
||||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(body.room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(body.user_id.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
drop(state_lock);
|
||||
|
||||
|
@ -232,7 +260,7 @@ pub async fn kick_user_route(
|
|||
pub async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<ban_user::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
|
||||
let event = services()
|
||||
let event = if let Some(event) = services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(
|
||||
|
@ -240,50 +268,59 @@ pub async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<ban_use
|
|||
&StateEventType::RoomMember,
|
||||
body.user_id.as_ref(),
|
||||
)?
|
||||
.map_or(
|
||||
Ok(RoomMemberEventContent {
|
||||
// Even when the previous member content is invalid, we should let the ban go through anyways.
|
||||
.and_then(|event| serde_json::from_str::<RoomMemberEventContent>(event.content.get()).ok())
|
||||
{
|
||||
// If they are already banned and the reason is unchanged, there isn't any point in sending a new event.
|
||||
if event.membership == MembershipState::Ban && event.reason == body.reason {
|
||||
return Ok(ban_user::v3::Response {});
|
||||
}
|
||||
|
||||
RoomMemberEventContent {
|
||||
membership: MembershipState::Ban,
|
||||
displayname: services().users.displayname(&body.user_id)?,
|
||||
avatar_url: services().users.avatar_url(&body.user_id)?,
|
||||
is_direct: None,
|
||||
third_party_invite: None,
|
||||
blurhash: services().users.blurhash(&body.user_id)?,
|
||||
reason: body.reason.clone(),
|
||||
join_authorized_via_users_server: None,
|
||||
}),
|
||||
|event| {
|
||||
serde_json::from_str(event.content.get())
|
||||
.map(|event: RoomMemberEventContent| RoomMemberEventContent {
|
||||
membership: MembershipState::Ban,
|
||||
..event
|
||||
})
|
||||
.map_err(|_| Error::bad_database("Invalid member event in database."))
|
||||
},
|
||||
)?;
|
||||
reason: body.reason.clone(),
|
||||
third_party_invite: None,
|
||||
is_direct: None,
|
||||
avatar_url: event.avatar_url,
|
||||
displayname: event.displayname,
|
||||
blurhash: event.blurhash,
|
||||
}
|
||||
} else {
|
||||
RoomMemberEventContent {
|
||||
reason: body.reason.clone(),
|
||||
..RoomMemberEventContent::new(MembershipState::Ban)
|
||||
}
|
||||
};
|
||||
|
||||
let mutex_state = Arc::clone(
|
||||
services()
|
||||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(body.room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(body.user_id.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
drop(state_lock);
|
||||
|
||||
|
@ -298,7 +335,7 @@ pub async fn unban_user_route(
|
|||
) -> Result<unban_user::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
|
||||
let mut event: RoomMemberEventContent = serde_json::from_str(
|
||||
let event: RoomMemberEventContent = serde_json::from_str(
|
||||
services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
|
@ -316,32 +353,48 @@ pub async fn unban_user_route(
|
|||
)
|
||||
.map_err(|_| Error::bad_database("Invalid member event in database."))?;
|
||||
|
||||
event.membership = MembershipState::Leave;
|
||||
event.reason = body.reason.clone();
|
||||
// If they are already unbanned and the reason is unchanged, there isn't any point in sending a new event.
|
||||
if event.membership == MembershipState::Leave && event.reason == body.reason {
|
||||
return Ok(unban_user::v3::Response {});
|
||||
}
|
||||
|
||||
let event = RoomMemberEventContent {
|
||||
is_direct: None,
|
||||
membership: MembershipState::Leave,
|
||||
third_party_invite: None,
|
||||
reason: body.reason.clone(),
|
||||
join_authorized_via_users_server: None,
|
||||
..event
|
||||
};
|
||||
|
||||
let mutex_state = Arc::clone(
|
||||
services()
|
||||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(body.room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(body.user_id.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
drop(state_lock);
|
||||
|
||||
|
@ -400,10 +453,10 @@ pub async fn get_member_events_route(
|
|||
if !services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_state_events(&sender_user, &body.room_id)?
|
||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"You don't have permission to view this room.",
|
||||
));
|
||||
}
|
||||
|
@ -435,10 +488,10 @@ pub async fn joined_members_route(
|
|||
if !services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_state_events(&sender_user, &body.room_id)?
|
||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"You don't have permission to view this room.",
|
||||
));
|
||||
}
|
||||
|
@ -474,12 +527,18 @@ async fn join_room_by_id_helper(
|
|||
) -> Result<join_room_by_id::v3::Response> {
|
||||
let sender_user = sender_user.expect("user is authenticated");
|
||||
|
||||
if let Ok(true) = services().rooms.state_cache.is_joined(sender_user, room_id) {
|
||||
return Ok(join_room_by_id::v3::Response {
|
||||
room_id: room_id.into(),
|
||||
});
|
||||
}
|
||||
|
||||
let mutex_state = Arc::clone(
|
||||
services()
|
||||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -568,7 +627,7 @@ async fn join_room_by_id_helper(
|
|||
let event_id = format!(
|
||||
"${}",
|
||||
ruma::signatures::reference_hash(&join_event_stub, &room_version_id)
|
||||
.expect("ruma can calculate reference hashes")
|
||||
.expect("Event format validated when event was hashed")
|
||||
);
|
||||
let event_id = <&EventId>::try_from(event_id.as_str())
|
||||
.expect("ruma's reference hashes are valid event ids");
|
||||
|
@ -619,7 +678,7 @@ async fn join_room_by_id_helper(
|
|||
));
|
||||
}
|
||||
|
||||
if let Ok(signature) = signed_value["signatures"]
|
||||
match signed_value["signatures"]
|
||||
.as_object()
|
||||
.ok_or(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
|
@ -630,20 +689,22 @@ async fn join_room_by_id_helper(
|
|||
ErrorKind::InvalidParam,
|
||||
"Server did not send its signature",
|
||||
))
|
||||
})
|
||||
{
|
||||
}) {
|
||||
Ok(signature) => {
|
||||
join_event
|
||||
.get_mut("signatures")
|
||||
.expect("we created a valid pdu")
|
||||
.as_object_mut()
|
||||
.expect("we created a valid pdu")
|
||||
.insert(remote_server.to_string(), signature.clone());
|
||||
} else {
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"Server {remote_server} sent invalid signature in sendjoin signatures for event {signed_value:?}",
|
||||
"Server {remote_server} sent invalid signature in sendjoin signatures for event {signed_value:?}: {e:?}",
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
services().rooms.short.get_or_create_shortroomid(room_id)?;
|
||||
|
||||
|
@ -668,7 +729,7 @@ async fn join_room_by_id_helper(
|
|||
.iter()
|
||||
.map(|pdu| validate_and_add_event_id(pdu, &room_version_id, &pub_key_map))
|
||||
{
|
||||
let (event_id, value) = match result {
|
||||
let (event_id, value) = match result.await {
|
||||
Ok(t) => t,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
@ -698,7 +759,7 @@ async fn join_room_by_id_helper(
|
|||
.iter()
|
||||
.map(|pdu| validate_and_add_event_id(pdu, &room_version_id, &pub_key_map))
|
||||
{
|
||||
let (event_id, value) = match result {
|
||||
let (event_id, value) = match result.await {
|
||||
Ok(t) => t,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
@ -710,7 +771,7 @@ async fn join_room_by_id_helper(
|
|||
}
|
||||
|
||||
info!("Running send_join auth check");
|
||||
if !state_res::event_auth::auth_check(
|
||||
let authenticated = state_res::event_auth::auth_check(
|
||||
&state_res::RoomVersion::new(&room_version_id).expect("room version is supported"),
|
||||
&parsed_join_pdu,
|
||||
None::<PduEvent>, // TODO: third party invite
|
||||
|
@ -733,7 +794,9 @@ async fn join_room_by_id_helper(
|
|||
.map_err(|e| {
|
||||
warn!("Auth check failed: {e}");
|
||||
Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")
|
||||
})? {
|
||||
})?;
|
||||
|
||||
if !authenticated {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Auth check failed",
|
||||
|
@ -770,12 +833,16 @@ async fn join_room_by_id_helper(
|
|||
let statehash_after_join = services().rooms.state.append_to_state(&parsed_join_pdu)?;
|
||||
|
||||
info!("Appending new room join event");
|
||||
services().rooms.timeline.append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.append_pdu(
|
||||
&parsed_join_pdu,
|
||||
join_event,
|
||||
vec![(*parsed_join_pdu.event_id).to_owned()],
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
info!("Setting final room state for new room");
|
||||
// We set the room state after inserting the pdu, so that we never have a moment in time
|
||||
|
@ -792,11 +859,6 @@ async fn join_room_by_id_helper(
|
|||
&StateEventType::RoomJoinRules,
|
||||
"",
|
||||
)?;
|
||||
let power_levels_event = services().rooms.state_accessor.room_state_get(
|
||||
room_id,
|
||||
&StateEventType::RoomPowerLevels,
|
||||
"",
|
||||
)?;
|
||||
|
||||
let join_rules_event_content: Option<RoomJoinRulesEventContent> = join_rules_event
|
||||
.as_ref()
|
||||
|
@ -807,15 +869,6 @@ async fn join_room_by_id_helper(
|
|||
})
|
||||
})
|
||||
.transpose()?;
|
||||
let power_levels_event_content: Option<RoomPowerLevelsEventContent> = power_levels_event
|
||||
.as_ref()
|
||||
.map(|power_levels_event| {
|
||||
serde_json::from_str(power_levels_event.content.get()).map_err(|e| {
|
||||
warn!("Invalid power levels event: {}", e);
|
||||
Error::bad_database("Invalid power levels event in db.")
|
||||
})
|
||||
})
|
||||
.transpose()?;
|
||||
|
||||
let restriction_rooms = match join_rules_event_content {
|
||||
Some(RoomJoinRulesEventContent {
|
||||
|
@ -834,47 +887,37 @@ async fn join_room_by_id_helper(
|
|||
_ => Vec::new(),
|
||||
};
|
||||
|
||||
let authorized_user = restriction_rooms
|
||||
.iter()
|
||||
.find_map(|restriction_room_id| {
|
||||
if !services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_joined(sender_user, restriction_room_id)
|
||||
.ok()?
|
||||
{
|
||||
return None;
|
||||
}
|
||||
let authorized_user = power_levels_event_content
|
||||
.as_ref()
|
||||
.and_then(|c| {
|
||||
c.users
|
||||
.iter()
|
||||
.filter(|(uid, i)| {
|
||||
uid.server_name() == services().globals.server_name()
|
||||
&& **i > ruma::int!(0)
|
||||
&& services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_joined(uid, restriction_room_id)
|
||||
.unwrap_or(false)
|
||||
})
|
||||
.max_by_key(|(_, i)| *i)
|
||||
.map(|(u, _)| u.to_owned())
|
||||
})
|
||||
.or_else(|| {
|
||||
// TODO: Check here if user is actually allowed to invite. Currently the auth
|
||||
// check will just fail in this case.
|
||||
let authorized_user = if restriction_rooms.iter().any(|restriction_room_id| {
|
||||
services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(restriction_room_id)
|
||||
.filter_map(|r| r.ok())
|
||||
.find(|uid| uid.server_name() == services().globals.server_name())
|
||||
});
|
||||
Some(authorized_user)
|
||||
})
|
||||
.flatten();
|
||||
.is_joined(sender_user, restriction_room_id)
|
||||
.unwrap_or(false)
|
||||
}) {
|
||||
let mut auth_user = None;
|
||||
for user in services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(room_id)
|
||||
.filter_map(Result::ok)
|
||||
.collect::<Vec<_>>()
|
||||
{
|
||||
if user.server_name() == services().globals.server_name()
|
||||
&& services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_invite(room_id, &user, sender_user, &state_lock)
|
||||
.await
|
||||
.unwrap_or(false)
|
||||
{
|
||||
auth_user = Some(user);
|
||||
break;
|
||||
}
|
||||
}
|
||||
auth_user
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let event = RoomMemberEventContent {
|
||||
membership: MembershipState::Join,
|
||||
|
@ -888,18 +931,24 @@ async fn join_room_by_id_helper(
|
|||
};
|
||||
|
||||
// Try normal join first
|
||||
let error = match services().rooms.timeline.build_and_append_pdu(
|
||||
let error = match services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(sender_user.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
room_id,
|
||||
&state_lock,
|
||||
) {
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(_event_id) => return Ok(join_room_by_id::v3::Response::new(room_id.to_owned())),
|
||||
Err(e) => e,
|
||||
};
|
||||
|
@ -907,9 +956,7 @@ async fn join_room_by_id_helper(
|
|||
if !restriction_rooms.is_empty()
|
||||
&& servers
|
||||
.iter()
|
||||
.filter(|s| *s != services().globals.server_name())
|
||||
.count()
|
||||
> 0
|
||||
.any(|s| *s != services().globals.server_name())
|
||||
{
|
||||
info!(
|
||||
"We couldn't do the join locally, maybe federation can help to satisfy the restricted join requirements"
|
||||
|
@ -940,6 +987,8 @@ async fn join_room_by_id_helper(
|
|||
.as_str()
|
||||
})
|
||||
.and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok());
|
||||
let restricted_join = join_authorized_via_users_server.is_some();
|
||||
|
||||
// TODO: Is origin needed?
|
||||
join_event_stub.insert(
|
||||
"origin".to_owned(),
|
||||
|
@ -986,7 +1035,7 @@ async fn join_room_by_id_helper(
|
|||
ruma::signatures::reference_hash(&join_event_stub, &room_version_id)
|
||||
.expect("ruma can calculate reference hashes")
|
||||
);
|
||||
let event_id = <&EventId>::try_from(event_id.as_str())
|
||||
let event_id = OwnedEventId::try_from(event_id)
|
||||
.expect("ruma's reference hashes are valid event ids");
|
||||
|
||||
// Add event_id back
|
||||
|
@ -1011,46 +1060,35 @@ async fn join_room_by_id_helper(
|
|||
)
|
||||
.await?;
|
||||
|
||||
if let Some(signed_raw) = send_join_response.room_state.event {
|
||||
let (signed_event_id, signed_value) =
|
||||
match gen_event_id_canonical_json(&signed_raw, &room_version_id) {
|
||||
Ok(t) => t,
|
||||
Err(_) => {
|
||||
// Event could not be converted to canonical json
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Could not convert event to canonical json.",
|
||||
));
|
||||
}
|
||||
};
|
||||
let pdu = if let Some(signed_raw) = send_join_response.room_state.event {
|
||||
let (signed_event_id, signed_pdu) =
|
||||
gen_event_id_canonical_json(&signed_raw, &room_version_id)?;
|
||||
|
||||
if signed_event_id != event_id {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
return Err(Error::BadServerResponse(
|
||||
"Server sent event with wrong event id",
|
||||
));
|
||||
}
|
||||
|
||||
signed_pdu
|
||||
} else if restricted_join {
|
||||
return Err(Error::BadServerResponse(
|
||||
"No signed event was returned, despite just performing a restricted join",
|
||||
));
|
||||
} else {
|
||||
join_event
|
||||
};
|
||||
|
||||
drop(state_lock);
|
||||
let pub_key_map = RwLock::new(BTreeMap::new());
|
||||
services()
|
||||
.rooms
|
||||
.event_handler
|
||||
.handle_incoming_pdu(
|
||||
&remote_server,
|
||||
&signed_event_id,
|
||||
room_id,
|
||||
signed_value,
|
||||
true,
|
||||
&pub_key_map,
|
||||
)
|
||||
.handle_incoming_pdu(&remote_server, &event_id, room_id, pdu, true, &pub_key_map)
|
||||
.await?;
|
||||
} else {
|
||||
return Err(error);
|
||||
}
|
||||
} else {
|
||||
return Err(error);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(join_room_by_id::v3::Response::new(room_id.to_owned()))
|
||||
|
@ -1095,10 +1133,10 @@ async fn make_join_request(
|
|||
make_join_response_and_server
|
||||
}
|
||||
|
||||
fn validate_and_add_event_id(
|
||||
async fn validate_and_add_event_id(
|
||||
pdu: &RawJsonValue,
|
||||
room_version: &RoomVersionId,
|
||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>,
|
||||
) -> Result<(OwnedEventId, CanonicalJsonObject)> {
|
||||
let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
|
||||
error!("Invalid PDU in server response: {:?}: {:?}", pdu, e);
|
||||
|
@ -1107,28 +1145,30 @@ fn validate_and_add_event_id(
|
|||
let event_id = EventId::parse(format!(
|
||||
"${}",
|
||||
ruma::signatures::reference_hash(&value, room_version)
|
||||
.expect("ruma can calculate reference hashes")
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid PDU format"))?
|
||||
))
|
||||
.expect("ruma's reference hashes are valid event ids");
|
||||
|
||||
let back_off = |id| match services()
|
||||
let back_off = |id| async {
|
||||
match services()
|
||||
.globals
|
||||
.bad_event_ratelimiter
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(id)
|
||||
{
|
||||
Entry::Vacant(e) => {
|
||||
e.insert((Instant::now(), 1));
|
||||
}
|
||||
Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
||||
}
|
||||
};
|
||||
|
||||
if let Some((time, tries)) = services()
|
||||
.globals
|
||||
.bad_event_ratelimiter
|
||||
.read()
|
||||
.unwrap()
|
||||
.await
|
||||
.get(&event_id)
|
||||
{
|
||||
// Exponential backoff
|
||||
|
@ -1143,15 +1183,37 @@ fn validate_and_add_event_id(
|
|||
}
|
||||
}
|
||||
|
||||
if let Err(e) = ruma::signatures::verify_event(
|
||||
&*pub_key_map
|
||||
.read()
|
||||
.map_err(|_| Error::bad_database("RwLock is poisoned."))?,
|
||||
&value,
|
||||
room_version,
|
||||
) {
|
||||
let origin_server_ts = value.get("origin_server_ts").ok_or_else(|| {
|
||||
error!("Invalid PDU, no origin_server_ts field");
|
||||
Error::BadRequest(
|
||||
ErrorKind::MissingParam,
|
||||
"Invalid PDU, no origin_server_ts field",
|
||||
)
|
||||
})?;
|
||||
|
||||
let origin_server_ts: MilliSecondsSinceUnixEpoch = {
|
||||
let ts = origin_server_ts.as_integer().ok_or_else(|| {
|
||||
Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"origin_server_ts must be an integer",
|
||||
)
|
||||
})?;
|
||||
|
||||
MilliSecondsSinceUnixEpoch(i64::from(ts).try_into().map_err(|_| {
|
||||
Error::BadRequest(ErrorKind::InvalidParam, "Time must be after the unix epoch")
|
||||
})?)
|
||||
};
|
||||
|
||||
let unfiltered_keys = (*pub_key_map.read().await).clone();
|
||||
|
||||
let keys =
|
||||
services()
|
||||
.globals
|
||||
.filter_keys_server_map(unfiltered_keys, origin_server_ts, room_version);
|
||||
|
||||
if let Err(e) = ruma::signatures::verify_event(&keys, &value, room_version) {
|
||||
warn!("Event {} failed verification {:?} {}", event_id, pdu, e);
|
||||
back_off(event_id);
|
||||
back_off(event_id).await;
|
||||
return Err(Error::BadServerResponse("Event failed verification."));
|
||||
}
|
||||
|
||||
|
@ -1177,7 +1239,7 @@ pub(crate) async fn invite_helper<'a>(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -1202,6 +1264,7 @@ pub(crate) async fn invite_helper<'a>(
|
|||
unsigned: None,
|
||||
state_key: Some(user_id.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
room_id,
|
||||
|
@ -1278,17 +1341,14 @@ pub(crate) async fn invite_helper<'a>(
|
|||
.filter(|server| &**server != services().globals.server_name());
|
||||
|
||||
services().sending.send_pdu(servers, &pdu_id)?;
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
} else {
|
||||
if !services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_joined(sender_user, room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"You don't have permission to view this room.",
|
||||
));
|
||||
}
|
||||
|
@ -1298,13 +1358,16 @@ pub(crate) async fn invite_helper<'a>(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
|
@ -1321,13 +1384,17 @@ pub(crate) async fn invite_helper<'a>(
|
|||
unsigned: None,
|
||||
state_key: Some(user_id.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Critical point ends
|
||||
drop(state_lock);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1361,8 +1428,10 @@ pub async fn leave_all_rooms(user_id: &UserId) -> Result<()> {
|
|||
|
||||
pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<String>) -> Result<()> {
|
||||
// Ask a remote server if we don't have this room
|
||||
if !services().rooms.metadata.exists(room_id)?
|
||||
&& room_id.server_name() != services().globals.server_name()
|
||||
if !services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.server_in_room(services().globals.server_name(), room_id)?
|
||||
{
|
||||
if let Err(e) = remote_leave_room(user_id, room_id).await {
|
||||
warn!("Failed to leave room {} remotely: {}", user_id, e);
|
||||
|
@ -1393,7 +1462,7 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<Strin
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -1423,24 +1492,33 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<Strin
|
|||
Some(e) => e,
|
||||
};
|
||||
|
||||
let mut event: RoomMemberEventContent = serde_json::from_str(member_event.content.get())
|
||||
.map_err(|_| Error::bad_database("Invalid member event in database."))?;
|
||||
let event = RoomMemberEventContent {
|
||||
is_direct: None,
|
||||
membership: MembershipState::Leave,
|
||||
third_party_invite: None,
|
||||
reason,
|
||||
join_authorized_via_users_server: None,
|
||||
..serde_json::from_str(member_event.content.get())
|
||||
.map_err(|_| Error::bad_database("Invalid member event in database."))?
|
||||
};
|
||||
|
||||
event.membership = MembershipState::Leave;
|
||||
event.reason = reason;
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(user_id.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
user_id,
|
||||
room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -1536,7 +1614,7 @@ async fn remote_leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
|||
let event_id = EventId::parse(format!(
|
||||
"${}",
|
||||
ruma::signatures::reference_hash(&leave_event_stub, &room_version_id)
|
||||
.expect("ruma can calculate reference hashes")
|
||||
.expect("Event format validated when event was hashed")
|
||||
))
|
||||
.expect("ruma's reference hashes are valid event ids");
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ pub async fn send_message_event_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(body.room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -43,7 +43,7 @@ pub async fn send_message_event_route(
|
|||
&& !services().globals.allow_encryption()
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"Encryption has been disabled",
|
||||
));
|
||||
}
|
||||
|
@ -73,7 +73,10 @@ pub async fn send_message_event_route(
|
|||
let mut unsigned = BTreeMap::new();
|
||||
unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into());
|
||||
|
||||
let event_id = services().rooms.timeline.build_and_append_pdu(
|
||||
let event_id = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: body.event_type.to_string().into(),
|
||||
content: serde_json::from_str(body.body.body.json().get())
|
||||
|
@ -81,11 +84,17 @@ pub async fn send_message_event_route(
|
|||
unsigned: Some(unsigned),
|
||||
state_key: None,
|
||||
redacts: None,
|
||||
timestamp: if body.appservice_info.is_some() {
|
||||
body.timestamp
|
||||
} else {
|
||||
None
|
||||
},
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
services().transaction_ids.add_txnid(
|
||||
sender_user,
|
||||
|
@ -124,14 +133,13 @@ pub async fn get_message_events_route(
|
|||
let to = body
|
||||
.to
|
||||
.as_ref()
|
||||
.and_then(|t| PduCount::try_from_string(&t).ok());
|
||||
.and_then(|t| PduCount::try_from_string(t).ok());
|
||||
|
||||
services().rooms.lazy_loading.lazy_load_confirm_delivery(
|
||||
sender_user,
|
||||
sender_device,
|
||||
&body.room_id,
|
||||
from,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.lazy_loading
|
||||
.lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)
|
||||
.await?;
|
||||
|
||||
let limit = u64::from(body.limit).min(100) as usize;
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@ mod keys;
|
|||
mod media;
|
||||
mod membership;
|
||||
mod message;
|
||||
mod openid;
|
||||
mod presence;
|
||||
mod profile;
|
||||
mod push;
|
||||
|
@ -32,6 +33,7 @@ mod typing;
|
|||
mod unversioned;
|
||||
mod user_directory;
|
||||
mod voip;
|
||||
mod well_known;
|
||||
|
||||
pub use account::*;
|
||||
pub use alias::*;
|
||||
|
@ -46,6 +48,7 @@ pub use keys::*;
|
|||
pub use media::*;
|
||||
pub use membership::*;
|
||||
pub use message::*;
|
||||
pub use openid::*;
|
||||
pub use presence::*;
|
||||
pub use profile::*;
|
||||
pub use push::*;
|
||||
|
@ -67,6 +70,7 @@ pub use typing::*;
|
|||
pub use unversioned::*;
|
||||
pub use user_directory::*;
|
||||
pub use voip::*;
|
||||
pub use well_known::*;
|
||||
|
||||
pub const DEVICE_ID_LENGTH: usize = 10;
|
||||
pub const TOKEN_LENGTH: usize = 32;
|
||||
|
|
23
src/api/client_server/openid.rs
Normal file
23
src/api/client_server/openid.rs
Normal file
|
@ -0,0 +1,23 @@
|
|||
use std::time::Duration;
|
||||
|
||||
use ruma::{api::client::account, authentication::TokenType};
|
||||
|
||||
use crate::{services, Result, Ruma};
|
||||
|
||||
/// # `POST /_matrix/client/r0/user/{userId}/openid/request_token`
|
||||
///
|
||||
/// Request an OpenID token to verify identity with third-party services.
|
||||
///
|
||||
/// - The token generated is only valid for the OpenID API.
|
||||
pub async fn create_openid_token_route(
|
||||
body: Ruma<account::request_openid_token::v3::Request>,
|
||||
) -> Result<account::request_openid_token::v3::Response> {
|
||||
let (access_token, expires_in) = services().users.create_openid_token(&body.user_id)?;
|
||||
|
||||
Ok(account::request_openid_token::v3::Response {
|
||||
access_token,
|
||||
token_type: TokenType::Bearer,
|
||||
matrix_server_name: services().globals.server_name().to_owned(),
|
||||
expires_in: Duration::from_secs(expires_in),
|
||||
})
|
||||
}
|
|
@ -1,5 +1,8 @@
|
|||
use crate::{services, utils, Result, Ruma};
|
||||
use ruma::api::client::presence::{get_presence, set_presence};
|
||||
use crate::{services, utils, Error, Result, Ruma};
|
||||
use ruma::api::client::{
|
||||
error::ErrorKind,
|
||||
presence::{get_presence, set_presence},
|
||||
};
|
||||
use std::time::Duration;
|
||||
|
||||
/// # `PUT /_matrix/client/r0/presence/{userId}/status`
|
||||
|
@ -79,6 +82,9 @@ pub async fn get_presence_route(
|
|||
presence: presence.content.presence,
|
||||
})
|
||||
} else {
|
||||
todo!();
|
||||
Err(Error::BadRequest(
|
||||
ErrorKind::NotFound,
|
||||
"Presence state for this user was not found",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,6 +40,7 @@ pub async fn set_displayname_route(
|
|||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
displayname: body.displayname.clone(),
|
||||
join_authorized_via_users_server: None,
|
||||
..serde_json::from_str(
|
||||
services()
|
||||
.rooms
|
||||
|
@ -64,6 +65,7 @@ pub async fn set_displayname_route(
|
|||
unsigned: None,
|
||||
state_key: Some(sender_user.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
room_id,
|
||||
))
|
||||
|
@ -77,18 +79,17 @@ pub async fn set_displayname_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
let _ = services().rooms.timeline.build_and_append_pdu(
|
||||
pdu_builder,
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
);
|
||||
let _ = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)
|
||||
.await;
|
||||
|
||||
// Presence update
|
||||
services().rooms.edus.presence.update_presence(
|
||||
|
@ -175,6 +176,7 @@ pub async fn set_avatar_url_route(
|
|||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
avatar_url: body.avatar_url.clone(),
|
||||
join_authorized_via_users_server: None,
|
||||
..serde_json::from_str(
|
||||
services()
|
||||
.rooms
|
||||
|
@ -199,6 +201,7 @@ pub async fn set_avatar_url_route(
|
|||
unsigned: None,
|
||||
state_key: Some(sender_user.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
room_id,
|
||||
))
|
||||
|
@ -212,18 +215,17 @@ pub async fn set_avatar_url_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
let _ = services().rooms.timeline.build_and_append_pdu(
|
||||
pdu_builder,
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
);
|
||||
let _ = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)
|
||||
.await;
|
||||
|
||||
// Presence update
|
||||
services().rooms.edus.presence.update_presence(
|
||||
|
|
|
@ -24,13 +24,16 @@ pub async fn redact_event_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(body.room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
let event_id = services().rooms.timeline.build_and_append_pdu(
|
||||
let event_id = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomRedaction,
|
||||
content: to_raw_value(&RoomRedactionEventContent {
|
||||
|
@ -41,11 +44,13 @@ pub async fn redact_event_route(
|
|||
unsigned: None,
|
||||
state_key: None,
|
||||
redacts: Some(body.event_id.into()),
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
drop(state_lock);
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ use ruma::api::client::relations::{
|
|||
get_relating_events_with_rel_type_and_event_type,
|
||||
};
|
||||
|
||||
use crate::{service::rooms::timeline::PduCount, services, Result, Ruma};
|
||||
use crate::{services, Result, Ruma};
|
||||
|
||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}`
|
||||
pub async fn get_relating_events_with_rel_type_and_event_type_route(
|
||||
|
@ -11,27 +11,6 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route(
|
|||
) -> Result<get_relating_events_with_rel_type_and_event_type::v1::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
|
||||
let from = match body.from.clone() {
|
||||
Some(from) => PduCount::try_from_string(&from)?,
|
||||
None => match ruma::api::Direction::Backward {
|
||||
// TODO: fix ruma so `body.dir` exists
|
||||
ruma::api::Direction::Forward => PduCount::min(),
|
||||
ruma::api::Direction::Backward => PduCount::max(),
|
||||
},
|
||||
};
|
||||
|
||||
let to = body
|
||||
.to
|
||||
.as_ref()
|
||||
.and_then(|t| PduCount::try_from_string(&t).ok());
|
||||
|
||||
// Use limit or else 10, with maximum 100
|
||||
let limit = body
|
||||
.limit
|
||||
.and_then(|u| u32::try_from(u).ok())
|
||||
.map_or(10_usize, |u| u as usize)
|
||||
.min(100);
|
||||
|
||||
let res = services()
|
||||
.rooms
|
||||
.pdu_metadata
|
||||
|
@ -41,9 +20,11 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route(
|
|||
&body.event_id,
|
||||
Some(body.event_type.clone()),
|
||||
Some(body.rel_type.clone()),
|
||||
from,
|
||||
to,
|
||||
limit,
|
||||
body.from.clone(),
|
||||
body.to.clone(),
|
||||
body.limit,
|
||||
body.recurse,
|
||||
&body.dir,
|
||||
)?;
|
||||
|
||||
Ok(
|
||||
|
@ -51,6 +32,7 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route(
|
|||
chunk: res.chunk,
|
||||
next_batch: res.next_batch,
|
||||
prev_batch: res.prev_batch,
|
||||
recursion_depth: res.recursion_depth,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
@ -61,27 +43,6 @@ pub async fn get_relating_events_with_rel_type_route(
|
|||
) -> Result<get_relating_events_with_rel_type::v1::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
|
||||
let from = match body.from.clone() {
|
||||
Some(from) => PduCount::try_from_string(&from)?,
|
||||
None => match ruma::api::Direction::Backward {
|
||||
// TODO: fix ruma so `body.dir` exists
|
||||
ruma::api::Direction::Forward => PduCount::min(),
|
||||
ruma::api::Direction::Backward => PduCount::max(),
|
||||
},
|
||||
};
|
||||
|
||||
let to = body
|
||||
.to
|
||||
.as_ref()
|
||||
.and_then(|t| PduCount::try_from_string(&t).ok());
|
||||
|
||||
// Use limit or else 10, with maximum 100
|
||||
let limit = body
|
||||
.limit
|
||||
.and_then(|u| u32::try_from(u).ok())
|
||||
.map_or(10_usize, |u| u as usize)
|
||||
.min(100);
|
||||
|
||||
let res = services()
|
||||
.rooms
|
||||
.pdu_metadata
|
||||
|
@ -91,15 +52,18 @@ pub async fn get_relating_events_with_rel_type_route(
|
|||
&body.event_id,
|
||||
None,
|
||||
Some(body.rel_type.clone()),
|
||||
from,
|
||||
to,
|
||||
limit,
|
||||
body.from.clone(),
|
||||
body.to.clone(),
|
||||
body.limit,
|
||||
body.recurse,
|
||||
&body.dir,
|
||||
)?;
|
||||
|
||||
Ok(get_relating_events_with_rel_type::v1::Response {
|
||||
chunk: res.chunk,
|
||||
next_batch: res.next_batch,
|
||||
prev_batch: res.prev_batch,
|
||||
recursion_depth: res.recursion_depth,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -109,27 +73,6 @@ pub async fn get_relating_events_route(
|
|||
) -> Result<get_relating_events::v1::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
|
||||
let from = match body.from.clone() {
|
||||
Some(from) => PduCount::try_from_string(&from)?,
|
||||
None => match ruma::api::Direction::Backward {
|
||||
// TODO: fix ruma so `body.dir` exists
|
||||
ruma::api::Direction::Forward => PduCount::min(),
|
||||
ruma::api::Direction::Backward => PduCount::max(),
|
||||
},
|
||||
};
|
||||
|
||||
let to = body
|
||||
.to
|
||||
.as_ref()
|
||||
.and_then(|t| PduCount::try_from_string(&t).ok());
|
||||
|
||||
// Use limit or else 10, with maximum 100
|
||||
let limit = body
|
||||
.limit
|
||||
.and_then(|u| u32::try_from(u).ok())
|
||||
.map_or(10_usize, |u| u as usize)
|
||||
.min(100);
|
||||
|
||||
services()
|
||||
.rooms
|
||||
.pdu_metadata
|
||||
|
@ -139,8 +82,10 @@ pub async fn get_relating_events_route(
|
|||
&body.event_id,
|
||||
None,
|
||||
None,
|
||||
from,
|
||||
to,
|
||||
limit,
|
||||
body.from.clone(),
|
||||
body.to.clone(),
|
||||
body.limit,
|
||||
body.recurse,
|
||||
&body.dir,
|
||||
)
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ use ruma::{
|
|||
},
|
||||
int,
|
||||
serde::JsonObject,
|
||||
CanonicalJsonObject, OwnedRoomAliasId, RoomAliasId, RoomId,
|
||||
CanonicalJsonObject, OwnedRoomAliasId, RoomAliasId, RoomId, RoomVersionId,
|
||||
};
|
||||
use serde_json::{json, value::to_raw_value};
|
||||
use std::{cmp::max, collections::BTreeMap, sync::Arc};
|
||||
|
@ -61,18 +61,18 @@ pub async fn create_room_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
if !services().globals.allow_room_creation()
|
||||
&& !body.from_appservice
|
||||
&& body.appservice_info.is_none()
|
||||
&& !services().users.is_admin(sender_user)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"Room creation has been disabled.",
|
||||
));
|
||||
}
|
||||
|
@ -104,6 +104,22 @@ pub async fn create_room_route(
|
|||
}
|
||||
})?;
|
||||
|
||||
if let Some(ref alias) = alias {
|
||||
if let Some(ref info) = body.appservice_info {
|
||||
if !info.aliases.is_match(alias.as_str()) {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"Room alias is not in namespace.",
|
||||
));
|
||||
}
|
||||
} else if services().appservice.is_exclusive_alias(alias).await {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"Room alias reserved by appservice.",
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
let room_version = match body.room_version.clone() {
|
||||
Some(room_version) => {
|
||||
if services()
|
||||
|
@ -127,12 +143,29 @@ pub async fn create_room_route(
|
|||
let mut content = content
|
||||
.deserialize_as::<CanonicalJsonObject>()
|
||||
.expect("Invalid creation content");
|
||||
|
||||
match room_version {
|
||||
RoomVersionId::V1
|
||||
| RoomVersionId::V2
|
||||
| RoomVersionId::V3
|
||||
| RoomVersionId::V4
|
||||
| RoomVersionId::V5
|
||||
| RoomVersionId::V6
|
||||
| RoomVersionId::V7
|
||||
| RoomVersionId::V8
|
||||
| RoomVersionId::V9
|
||||
| RoomVersionId::V10 => {
|
||||
content.insert(
|
||||
"creator".into(),
|
||||
json!(&sender_user).try_into().map_err(|_| {
|
||||
Error::BadRequest(ErrorKind::BadJson, "Invalid creation content")
|
||||
})?,
|
||||
);
|
||||
}
|
||||
RoomVersionId::V11 => {} // V11 removed the "creator" key
|
||||
_ => unreachable!("Validity of room version already checked"),
|
||||
}
|
||||
|
||||
content.insert(
|
||||
"room_version".into(),
|
||||
json!(room_version.as_str()).try_into().map_err(|_| {
|
||||
|
@ -142,9 +175,22 @@ pub async fn create_room_route(
|
|||
content
|
||||
}
|
||||
None => {
|
||||
// TODO: Add correct value for v11
|
||||
let content = match room_version {
|
||||
RoomVersionId::V1
|
||||
| RoomVersionId::V2
|
||||
| RoomVersionId::V3
|
||||
| RoomVersionId::V4
|
||||
| RoomVersionId::V5
|
||||
| RoomVersionId::V6
|
||||
| RoomVersionId::V7
|
||||
| RoomVersionId::V8
|
||||
| RoomVersionId::V9
|
||||
| RoomVersionId::V10 => RoomCreateEventContent::new_v1(sender_user.clone()),
|
||||
RoomVersionId::V11 => RoomCreateEventContent::new_v11(),
|
||||
_ => unreachable!("Validity of room version already checked"),
|
||||
};
|
||||
let mut content = serde_json::from_str::<CanonicalJsonObject>(
|
||||
to_raw_value(&RoomCreateEventContent::new_v1(sender_user.clone()))
|
||||
to_raw_value(&content)
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))?
|
||||
.get(),
|
||||
)
|
||||
|
@ -174,21 +220,29 @@ pub async fn create_room_route(
|
|||
}
|
||||
|
||||
// 1. The room create event
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomCreate,
|
||||
content: to_raw_value(&content).expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 2. Let the room creator join
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
|
@ -205,11 +259,13 @@ pub async fn create_room_route(
|
|||
unsigned: None,
|
||||
state_key: Some(sender_user.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 3. Power levels
|
||||
|
||||
|
@ -246,7 +302,10 @@ pub async fn create_room_route(
|
|||
}
|
||||
}
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomPowerLevels,
|
||||
content: to_raw_value(&power_levels_content)
|
||||
|
@ -254,15 +313,20 @@ pub async fn create_room_route(
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 4. Canonical room alias
|
||||
if let Some(room_alias_id) = &alias {
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomCanonicalAlias,
|
||||
content: to_raw_value(&RoomCanonicalAliasEventContent {
|
||||
|
@ -273,17 +337,22 @@ pub async fn create_room_route(
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// 5. Events set by preset
|
||||
|
||||
// 5.1 Join Rules
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomJoinRules,
|
||||
content: to_raw_value(&RoomJoinRulesEventContent::new(match preset {
|
||||
|
@ -295,14 +364,19 @@ pub async fn create_room_route(
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 5.2 History Visibility
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomHistoryVisibility,
|
||||
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
|
||||
|
@ -312,14 +386,19 @@ pub async fn create_room_route(
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 5.3 Guest Access
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomGuestAccess,
|
||||
content: to_raw_value(&RoomGuestAccessEventContent::new(match preset {
|
||||
|
@ -330,11 +409,13 @@ pub async fn create_room_route(
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 6. Events listed in initial_state
|
||||
for event in &body.initial_state {
|
||||
|
@ -353,33 +434,40 @@ pub async fn create_room_route(
|
|||
continue;
|
||||
}
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
pdu_builder,
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// 7. Events implied by name and topic
|
||||
if let Some(name) = &body.name {
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomName,
|
||||
content: to_raw_value(&RoomNameEventContent::new(Some(name.clone())))
|
||||
content: to_raw_value(&RoomNameEventContent::new(name.clone()))
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
if let Some(topic) = &body.topic {
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomTopic,
|
||||
content: to_raw_value(&RoomTopicEventContent {
|
||||
|
@ -389,11 +477,13 @@ pub async fn create_room_route(
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// 8. Events implied by invite (and TODO: invite_3pid)
|
||||
|
@ -404,7 +494,10 @@ pub async fn create_room_route(
|
|||
|
||||
// Homeserver specific stuff
|
||||
if let Some(alias) = alias {
|
||||
services().rooms.alias.set_alias(&alias, &room_id)?;
|
||||
services()
|
||||
.rooms
|
||||
.alias
|
||||
.set_alias(&alias, &room_id, sender_user)?;
|
||||
}
|
||||
|
||||
if body.visibility == room::Visibility::Public {
|
||||
|
@ -441,7 +534,7 @@ pub async fn get_room_event_route(
|
|||
&body.event_id,
|
||||
)? {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"You don't have permission to view this event.",
|
||||
));
|
||||
}
|
||||
|
@ -470,7 +563,7 @@ pub async fn get_room_aliases_route(
|
|||
.is_joined(sender_user, &body.room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"You don't have permission to view this room.",
|
||||
));
|
||||
}
|
||||
|
@ -523,7 +616,7 @@ pub async fn upgrade_room_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(body.room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -531,7 +624,10 @@ pub async fn upgrade_room_route(
|
|||
|
||||
// Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further
|
||||
// Fail if the sender does not have the required permissions
|
||||
let tombstone_event_id = services().rooms.timeline.build_and_append_pdu(
|
||||
let tombstone_event_id = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomTombstone,
|
||||
content: to_raw_value(&RoomTombstoneEventContent {
|
||||
|
@ -542,11 +638,13 @@ pub async fn upgrade_room_route(
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Change lock to replacement room
|
||||
drop(state_lock);
|
||||
|
@ -555,7 +653,7 @@ pub async fn upgrade_room_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(replacement_room.clone())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -580,12 +678,30 @@ pub async fn upgrade_room_route(
|
|||
));
|
||||
|
||||
// Send a m.room.create event containing a predecessor field and the applicable room_version
|
||||
match body.new_version {
|
||||
RoomVersionId::V1
|
||||
| RoomVersionId::V2
|
||||
| RoomVersionId::V3
|
||||
| RoomVersionId::V4
|
||||
| RoomVersionId::V5
|
||||
| RoomVersionId::V6
|
||||
| RoomVersionId::V7
|
||||
| RoomVersionId::V8
|
||||
| RoomVersionId::V9
|
||||
| RoomVersionId::V10 => {
|
||||
create_event_content.insert(
|
||||
"creator".into(),
|
||||
json!(&sender_user)
|
||||
.try_into()
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?,
|
||||
json!(&sender_user).try_into().map_err(|_| {
|
||||
Error::BadRequest(ErrorKind::BadJson, "Error forming creation event")
|
||||
})?,
|
||||
);
|
||||
}
|
||||
RoomVersionId::V11 => {
|
||||
// "creator" key no longer exists in V11 rooms
|
||||
create_event_content.remove("creator");
|
||||
}
|
||||
_ => unreachable!("Validity of room version already checked"),
|
||||
}
|
||||
create_event_content.insert(
|
||||
"room_version".into(),
|
||||
json!(&body.new_version)
|
||||
|
@ -613,7 +729,10 @@ pub async fn upgrade_room_route(
|
|||
));
|
||||
}
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomCreate,
|
||||
content: to_raw_value(&create_event_content)
|
||||
|
@ -621,14 +740,19 @@ pub async fn upgrade_room_route(
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&replacement_room,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Join the new room
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
|
@ -645,11 +769,13 @@ pub async fn upgrade_room_route(
|
|||
unsigned: None,
|
||||
state_key: Some(sender_user.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&replacement_room,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Recommended transferable state events list from the specs
|
||||
let transferable_state_events = vec![
|
||||
|
@ -676,18 +802,23 @@ pub async fn upgrade_room_route(
|
|||
None => continue, // Skipping missing events.
|
||||
};
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: event_type.to_string().into(),
|
||||
content: event_content,
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&replacement_room,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// Moves any local aliases to the new room
|
||||
|
@ -700,7 +831,7 @@ pub async fn upgrade_room_route(
|
|||
services()
|
||||
.rooms
|
||||
.alias
|
||||
.set_alias(&alias, &replacement_room)?;
|
||||
.set_alias(&alias, &replacement_room, sender_user)?;
|
||||
}
|
||||
|
||||
// Get the old room power levels
|
||||
|
@ -721,7 +852,10 @@ pub async fn upgrade_room_route(
|
|||
power_levels_event_content.invite = new_level;
|
||||
|
||||
// Modify the power levels in the old room to prevent sending of events and inviting new users
|
||||
let _ = services().rooms.timeline.build_and_append_pdu(
|
||||
let _ = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomPowerLevels,
|
||||
content: to_raw_value(&power_levels_event_content)
|
||||
|
@ -729,11 +863,13 @@ pub async fn upgrade_room_route(
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
drop(state_lock);
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ pub async fn search_events_route(
|
|||
.is_joined(sender_user, &room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"You don't have permission to view this room.",
|
||||
));
|
||||
}
|
||||
|
@ -89,7 +89,8 @@ pub async fn search_events_route(
|
|||
.get_pdu_from_id(result)
|
||||
.ok()?
|
||||
.filter(|pdu| {
|
||||
services()
|
||||
!pdu.is_redacted()
|
||||
&& services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id)
|
||||
|
|
|
@ -42,29 +42,43 @@ pub async fn get_login_types_route(
|
|||
/// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see
|
||||
/// supported login types.
|
||||
pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Response> {
|
||||
// To allow deprecated login methods
|
||||
#![allow(deprecated)]
|
||||
// Validate login method
|
||||
// TODO: Other login methods
|
||||
let user_id = match &body.login_info {
|
||||
login::v3::LoginInfo::Password(login::v3::Password {
|
||||
identifier,
|
||||
password,
|
||||
user,
|
||||
address: _,
|
||||
medium: _,
|
||||
}) => {
|
||||
let username = if let UserIdentifier::UserIdOrLocalpart(user_id) = identifier {
|
||||
user_id.to_lowercase()
|
||||
let user_id = if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
|
||||
UserId::parse_with_server_name(
|
||||
user_id.to_lowercase(),
|
||||
services().globals.server_name(),
|
||||
)
|
||||
} else if let Some(user) = user {
|
||||
UserId::parse(user)
|
||||
} else {
|
||||
warn!("Bad login type: {:?}", &body.login_info);
|
||||
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
|
||||
};
|
||||
let user_id =
|
||||
UserId::parse_with_server_name(username, services().globals.server_name())
|
||||
.map_err(|_| {
|
||||
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
|
||||
})?;
|
||||
return Err(Error::BadRequest(ErrorKind::forbidden(), "Bad login type."));
|
||||
}
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;
|
||||
|
||||
if services().appservice.is_exclusive_user_id(&user_id).await {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"User id reserved by appservice.",
|
||||
));
|
||||
}
|
||||
|
||||
let hash = services()
|
||||
.users
|
||||
.password_hash(&user_id)?
|
||||
.ok_or(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"Wrong username or password.",
|
||||
))?;
|
||||
|
||||
|
@ -79,7 +93,7 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
|||
|
||||
if !hash_matches {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"Wrong username or password.",
|
||||
));
|
||||
}
|
||||
|
@ -95,9 +109,20 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
|||
)
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Token is invalid."))?;
|
||||
let username = token.claims.sub.to_lowercase();
|
||||
UserId::parse_with_server_name(username, services().globals.server_name()).map_err(
|
||||
|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."),
|
||||
)?
|
||||
let user_id =
|
||||
UserId::parse_with_server_name(username, services().globals.server_name())
|
||||
.map_err(|_| {
|
||||
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
|
||||
})?;
|
||||
|
||||
if services().appservice.is_exclusive_user_id(&user_id).await {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"User id reserved by appservice.",
|
||||
));
|
||||
}
|
||||
|
||||
user_id
|
||||
} else {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Unknown,
|
||||
|
@ -105,23 +130,37 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
|||
));
|
||||
}
|
||||
}
|
||||
login::v3::LoginInfo::ApplicationService(login::v3::ApplicationService { identifier }) => {
|
||||
if !body.from_appservice {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
"Forbidden login type.",
|
||||
));
|
||||
};
|
||||
let username = if let UserIdentifier::UserIdOrLocalpart(user_id) = identifier {
|
||||
user_id.to_lowercase()
|
||||
login::v3::LoginInfo::ApplicationService(login::v3::ApplicationService {
|
||||
identifier,
|
||||
user,
|
||||
}) => {
|
||||
let user_id = if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
|
||||
UserId::parse_with_server_name(
|
||||
user_id.to_lowercase(),
|
||||
services().globals.server_name(),
|
||||
)
|
||||
} else if let Some(user) = user {
|
||||
UserId::parse(user)
|
||||
} else {
|
||||
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
|
||||
};
|
||||
let user_id =
|
||||
UserId::parse_with_server_name(username, services().globals.server_name())
|
||||
.map_err(|_| {
|
||||
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
|
||||
})?;
|
||||
warn!("Bad login type: {:?}", &body.login_info);
|
||||
return Err(Error::BadRequest(ErrorKind::forbidden(), "Bad login type."));
|
||||
}
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;
|
||||
|
||||
if let Some(ref info) = body.appservice_info {
|
||||
if !info.is_user_match(&user_id) {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"User is not in namespace.",
|
||||
));
|
||||
}
|
||||
} else {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::MissingToken,
|
||||
"Missing appservice token.",
|
||||
));
|
||||
}
|
||||
|
||||
user_id
|
||||
}
|
||||
_ => {
|
||||
|
@ -163,6 +202,8 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
|||
|
||||
info!("{} logged in", user_id);
|
||||
|
||||
// Homeservers are still required to send the `home_server` field
|
||||
#[allow(deprecated)]
|
||||
Ok(login::v3::Response {
|
||||
user_id,
|
||||
access_token: token,
|
||||
|
@ -186,6 +227,15 @@ pub async fn logout_route(body: Ruma<logout::v3::Request>) -> Result<logout::v3:
|
|||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||
|
||||
if let Some(ref info) = body.appservice_info {
|
||||
if !info.is_user_match(sender_user) {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"User is not in namespace.",
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
services().users.remove_device(sender_user, sender_device)?;
|
||||
|
||||
Ok(logout::v3::Response::new())
|
||||
|
@ -207,6 +257,20 @@ pub async fn logout_all_route(
|
|||
) -> Result<logout_all::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
|
||||
if let Some(ref info) = body.appservice_info {
|
||||
if !info.is_user_match(sender_user) {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Exclusive,
|
||||
"User is not in namespace.",
|
||||
));
|
||||
}
|
||||
} else {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::MissingToken,
|
||||
"Missing appservice token.",
|
||||
));
|
||||
}
|
||||
|
||||
for device_id in services().users.all_device_ids(sender_user).flatten() {
|
||||
services().users.remove_device(sender_user, &device_id)?;
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ use ruma::{
|
|||
room::canonical_alias::RoomCanonicalAliasEventContent, AnyStateEventContent, StateEventType,
|
||||
},
|
||||
serde::Raw,
|
||||
EventId, RoomId, UserId,
|
||||
EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId,
|
||||
};
|
||||
use tracing::log::warn;
|
||||
|
||||
|
@ -32,6 +32,11 @@ pub async fn send_state_event_for_key_route(
|
|||
&body.event_type,
|
||||
&body.body.body, // Yes, I hate it too
|
||||
body.state_key.to_owned(),
|
||||
if body.appservice_info.is_some() {
|
||||
body.timestamp
|
||||
} else {
|
||||
None
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
@ -54,7 +59,7 @@ pub async fn send_state_event_for_empty_key_route(
|
|||
// Forbid m.room.encryption if encryption is disabled
|
||||
if body.event_type == StateEventType::RoomEncryption && !services().globals.allow_encryption() {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"Encryption has been disabled",
|
||||
));
|
||||
}
|
||||
|
@ -65,6 +70,11 @@ pub async fn send_state_event_for_empty_key_route(
|
|||
&body.event_type.to_string().into(),
|
||||
&body.body.body,
|
||||
body.state_key.to_owned(),
|
||||
if body.appservice_info.is_some() {
|
||||
body.timestamp
|
||||
} else {
|
||||
None
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
@ -85,10 +95,10 @@ pub async fn get_state_events_route(
|
|||
if !services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_state_events(&sender_user, &body.room_id)?
|
||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"You don't have permission to view the room state.",
|
||||
));
|
||||
}
|
||||
|
@ -118,10 +128,10 @@ pub async fn get_state_events_for_key_route(
|
|||
if !services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_state_events(&sender_user, &body.room_id)?
|
||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"You don't have permission to view the room state.",
|
||||
));
|
||||
}
|
||||
|
@ -157,10 +167,10 @@ pub async fn get_state_events_for_empty_key_route(
|
|||
if !services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_state_events(&sender_user, &body.room_id)?
|
||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"You don't have permission to view the room state.",
|
||||
));
|
||||
}
|
||||
|
@ -190,6 +200,7 @@ async fn send_state_event_for_key_helper(
|
|||
event_type: &StateEventType,
|
||||
json: &Raw<AnyStateEventContent>,
|
||||
state_key: String,
|
||||
timestamp: Option<MilliSecondsSinceUnixEpoch>,
|
||||
) -> Result<Arc<EventId>> {
|
||||
let sender_user = sender;
|
||||
|
||||
|
@ -214,7 +225,7 @@ async fn send_state_event_for_key_helper(
|
|||
.is_none()
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"You are only allowed to send canonical_alias \
|
||||
events when it's aliases already exists",
|
||||
));
|
||||
|
@ -227,24 +238,29 @@ async fn send_state_event_for_key_helper(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
let event_id = services().rooms.timeline.build_and_append_pdu(
|
||||
let event_id = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: event_type.to_string().into(),
|
||||
content: serde_json::from_str(json.json().get()).expect("content is valid json"),
|
||||
unsigned: None,
|
||||
state_key: Some(state_key),
|
||||
redacts: None,
|
||||
timestamp,
|
||||
},
|
||||
sender_user,
|
||||
room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(event_id)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
use crate::{
|
||||
service::rooms::timeline::PduCount, services, Error, PduEvent, Result, Ruma, RumaResponse,
|
||||
service::{pdu::EventHash, rooms::timeline::PduCount},
|
||||
services, utils, Error, PduEvent, Result, Ruma, RumaResponse,
|
||||
};
|
||||
|
||||
use ruma::{
|
||||
api::client::{
|
||||
filter::{FilterDefinition, LazyLoadOptions},
|
||||
|
@ -10,7 +12,7 @@ use ruma::{
|
|||
Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom,
|
||||
LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State, Timeline, ToDevice,
|
||||
},
|
||||
v4::SlidingOp,
|
||||
v4::{SlidingOp, SlidingSyncRoomHero},
|
||||
DeviceLists, UnreadNotificationsCount,
|
||||
},
|
||||
uiaa::UiaaResponse,
|
||||
|
@ -20,7 +22,7 @@ use ruma::{
|
|||
StateEventType, TimelineEventType,
|
||||
},
|
||||
serde::Raw,
|
||||
uint, DeviceId, OwnedDeviceId, OwnedUserId, RoomId, UInt, UserId,
|
||||
uint, DeviceId, EventId, JsOption, OwnedDeviceId, OwnedUserId, RoomId, UInt, UserId,
|
||||
};
|
||||
use std::{
|
||||
collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet},
|
||||
|
@ -28,7 +30,7 @@ use std::{
|
|||
time::Duration,
|
||||
};
|
||||
use tokio::sync::watch::Sender;
|
||||
use tracing::error;
|
||||
use tracing::{error, info};
|
||||
|
||||
/// # `GET /_matrix/client/r0/sync`
|
||||
///
|
||||
|
@ -75,7 +77,7 @@ pub async fn sync_events_route(
|
|||
.globals
|
||||
.sync_receivers
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry((sender_user.clone(), sender_device.clone()))
|
||||
{
|
||||
Entry::Vacant(v) => {
|
||||
|
@ -98,6 +100,8 @@ pub async fn sync_events_route(
|
|||
|
||||
o.insert((body.since.clone(), rx.clone()));
|
||||
|
||||
info!("Sync started for {sender_user}");
|
||||
|
||||
tokio::spawn(sync_helper_wrapper(
|
||||
sender_user.clone(),
|
||||
sender_device.clone(),
|
||||
|
@ -147,7 +151,7 @@ async fn sync_helper_wrapper(
|
|||
.globals
|
||||
.sync_receivers
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry((sender_user, sender_device))
|
||||
{
|
||||
Entry::Occupied(o) => {
|
||||
|
@ -293,8 +297,6 @@ async fn sync_helper(
|
|||
for result in all_left_rooms {
|
||||
let (room_id, _) = result?;
|
||||
|
||||
let mut left_state_events = Vec::new();
|
||||
|
||||
{
|
||||
// Get and drop the lock to wait for remaining operations to finish
|
||||
let mutex_insert = Arc::clone(
|
||||
|
@ -302,11 +304,11 @@ async fn sync_helper(
|
|||
.globals
|
||||
.roomid_mutex_insert
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let insert_lock = mutex_insert.lock().unwrap();
|
||||
let insert_lock = mutex_insert.lock().await;
|
||||
drop(insert_lock);
|
||||
}
|
||||
|
||||
|
@ -322,9 +324,48 @@ async fn sync_helper(
|
|||
|
||||
if !services().rooms.metadata.exists(&room_id)? {
|
||||
// This is just a rejected invite, not a room we know
|
||||
let event = PduEvent {
|
||||
event_id: EventId::new(services().globals.server_name()).into(),
|
||||
sender: sender_user.clone(),
|
||||
origin_server_ts: utils::millis_since_unix_epoch()
|
||||
.try_into()
|
||||
.expect("Timestamp is valid js_int value"),
|
||||
kind: TimelineEventType::RoomMember,
|
||||
content: serde_json::from_str(r#"{ "membership": "leave"}"#).unwrap(),
|
||||
state_key: Some(sender_user.to_string()),
|
||||
unsigned: None,
|
||||
// The following keys are dropped on conversion
|
||||
room_id: room_id.clone(),
|
||||
prev_events: vec![],
|
||||
depth: uint!(1),
|
||||
auth_events: vec![],
|
||||
redacts: None,
|
||||
hashes: EventHash {
|
||||
sha256: String::new(),
|
||||
},
|
||||
signatures: None,
|
||||
};
|
||||
|
||||
left_rooms.insert(
|
||||
room_id,
|
||||
LeftRoom {
|
||||
account_data: RoomAccountData { events: Vec::new() },
|
||||
timeline: Timeline {
|
||||
limited: false,
|
||||
prev_batch: Some(next_batch_string.clone()),
|
||||
events: Vec::new(),
|
||||
},
|
||||
state: State {
|
||||
events: vec![event.to_sync_state_event()],
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut left_state_events = Vec::new();
|
||||
|
||||
let since_shortstatehash = services()
|
||||
.rooms
|
||||
.user
|
||||
|
@ -434,11 +475,11 @@ async fn sync_helper(
|
|||
.globals
|
||||
.roomid_mutex_insert
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let insert_lock = mutex_insert.lock().unwrap();
|
||||
let insert_lock = mutex_insert.lock().await;
|
||||
drop(insert_lock);
|
||||
}
|
||||
|
||||
|
@ -554,6 +595,7 @@ async fn sync_helper(
|
|||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn load_joined_room(
|
||||
sender_user: &UserId,
|
||||
sender_device: &DeviceId,
|
||||
|
@ -576,11 +618,11 @@ async fn load_joined_room(
|
|||
.globals
|
||||
.roomid_mutex_insert
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
let insert_lock = mutex_insert.lock().unwrap();
|
||||
let insert_lock = mutex_insert.lock().await;
|
||||
drop(insert_lock);
|
||||
}
|
||||
|
||||
|
@ -590,7 +632,7 @@ async fn load_joined_room(
|
|||
|| services()
|
||||
.rooms
|
||||
.user
|
||||
.last_notification_read(&sender_user, &room_id)?
|
||||
.last_notification_read(sender_user, room_id)?
|
||||
> since;
|
||||
|
||||
let mut timeline_users = HashSet::new();
|
||||
|
@ -598,17 +640,16 @@ async fn load_joined_room(
|
|||
timeline_users.insert(event.sender.as_str().to_owned());
|
||||
}
|
||||
|
||||
services().rooms.lazy_loading.lazy_load_confirm_delivery(
|
||||
&sender_user,
|
||||
&sender_device,
|
||||
&room_id,
|
||||
sincecount,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.lazy_loading
|
||||
.lazy_load_confirm_delivery(sender_user, sender_device, room_id, sincecount)
|
||||
.await?;
|
||||
|
||||
// Database queries:
|
||||
|
||||
let current_shortstatehash =
|
||||
if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? {
|
||||
if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? {
|
||||
s
|
||||
} else {
|
||||
error!("Room {} has no state", room_id);
|
||||
|
@ -618,7 +659,7 @@ async fn load_joined_room(
|
|||
let since_shortstatehash = services()
|
||||
.rooms
|
||||
.user
|
||||
.get_token_shortstatehash(&room_id, since)?;
|
||||
.get_token_shortstatehash(room_id, since)?;
|
||||
|
||||
let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) =
|
||||
if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) {
|
||||
|
@ -630,12 +671,12 @@ async fn load_joined_room(
|
|||
let joined_member_count = services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_joined_count(&room_id)?
|
||||
.room_joined_count(room_id)?
|
||||
.unwrap_or(0);
|
||||
let invited_member_count = services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_invited_count(&room_id)?
|
||||
.room_invited_count(room_id)?
|
||||
.unwrap_or(0);
|
||||
|
||||
// Recalculate heroes (first 5 members)
|
||||
|
@ -648,7 +689,7 @@ async fn load_joined_room(
|
|||
for hero in services()
|
||||
.rooms
|
||||
.timeline
|
||||
.all_pdus(&sender_user, &room_id)?
|
||||
.all_pdus(sender_user, room_id)?
|
||||
.filter_map(|pdu| pdu.ok()) // Ignore all broken pdus
|
||||
.filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember)
|
||||
.map(|(_, pdu)| {
|
||||
|
@ -669,13 +710,13 @@ async fn load_joined_room(
|
|||
) && (services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_joined(&user_id, &room_id)?
|
||||
.is_joined(&user_id, room_id)?
|
||||
|| services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_invited(&user_id, &room_id)?)
|
||||
.is_invited(&user_id, room_id)?)
|
||||
{
|
||||
Ok::<_, Error>(Some(state_key.clone()))
|
||||
Ok::<_, Error>(Some(user_id))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
|
@ -789,20 +830,24 @@ async fn load_joined_room(
|
|||
|
||||
// Reset lazy loading because this is an initial sync
|
||||
services().rooms.lazy_loading.lazy_load_reset(
|
||||
&sender_user,
|
||||
&sender_device,
|
||||
&room_id,
|
||||
sender_user,
|
||||
sender_device,
|
||||
room_id,
|
||||
)?;
|
||||
|
||||
// The state_events above should contain all timeline_users, let's mark them as lazy
|
||||
// loaded.
|
||||
services().rooms.lazy_loading.lazy_load_mark_sent(
|
||||
&sender_user,
|
||||
&sender_device,
|
||||
&room_id,
|
||||
services()
|
||||
.rooms
|
||||
.lazy_loading
|
||||
.lazy_load_mark_sent(
|
||||
sender_user,
|
||||
sender_device,
|
||||
room_id,
|
||||
lazy_loaded,
|
||||
next_batchcount,
|
||||
);
|
||||
)
|
||||
.await;
|
||||
|
||||
(
|
||||
heroes,
|
||||
|
@ -866,14 +911,14 @@ async fn load_joined_room(
|
|||
}
|
||||
|
||||
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
||||
&sender_user,
|
||||
&sender_device,
|
||||
&room_id,
|
||||
sender_user,
|
||||
sender_device,
|
||||
room_id,
|
||||
&event.sender,
|
||||
)? || lazy_load_send_redundant
|
||||
{
|
||||
if let Some(member_event) = services().rooms.state_accessor.room_state_get(
|
||||
&room_id,
|
||||
room_id,
|
||||
&StateEventType::RoomMember,
|
||||
event.sender.as_str(),
|
||||
)? {
|
||||
|
@ -883,13 +928,17 @@ async fn load_joined_room(
|
|||
}
|
||||
}
|
||||
|
||||
services().rooms.lazy_loading.lazy_load_mark_sent(
|
||||
&sender_user,
|
||||
&sender_device,
|
||||
&room_id,
|
||||
services()
|
||||
.rooms
|
||||
.lazy_loading
|
||||
.lazy_load_mark_sent(
|
||||
sender_user,
|
||||
sender_device,
|
||||
room_id,
|
||||
lazy_loaded,
|
||||
next_batchcount,
|
||||
);
|
||||
)
|
||||
.await;
|
||||
|
||||
let encrypted_room = services()
|
||||
.rooms
|
||||
|
@ -934,7 +983,7 @@ async fn load_joined_room(
|
|||
match new_membership {
|
||||
MembershipState::Join => {
|
||||
// A new user joined an encrypted room
|
||||
if !share_encrypted_room(&sender_user, &user_id, &room_id)? {
|
||||
if !share_encrypted_room(sender_user, &user_id, room_id)? {
|
||||
device_list_updates.insert(user_id);
|
||||
}
|
||||
}
|
||||
|
@ -954,15 +1003,15 @@ async fn load_joined_room(
|
|||
services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(&room_id)
|
||||
.room_members(room_id)
|
||||
.flatten()
|
||||
.filter(|user_id| {
|
||||
// Don't send key updates from the sender to the sender
|
||||
&sender_user != user_id
|
||||
sender_user != user_id
|
||||
})
|
||||
.filter(|user_id| {
|
||||
// Only send keys if the sender doesn't share an encrypted room with the target already
|
||||
!share_encrypted_room(&sender_user, user_id, &room_id)
|
||||
!share_encrypted_room(sender_user, user_id, room_id)
|
||||
.unwrap_or(false)
|
||||
}),
|
||||
);
|
||||
|
@ -997,7 +1046,7 @@ async fn load_joined_room(
|
|||
services()
|
||||
.rooms
|
||||
.user
|
||||
.notification_count(&sender_user, &room_id)?
|
||||
.notification_count(sender_user, room_id)?
|
||||
.try_into()
|
||||
.expect("notification count can't go that high"),
|
||||
)
|
||||
|
@ -1010,7 +1059,7 @@ async fn load_joined_room(
|
|||
services()
|
||||
.rooms
|
||||
.user
|
||||
.highlight_count(&sender_user, &room_id)?
|
||||
.highlight_count(sender_user, room_id)?
|
||||
.try_into()
|
||||
.expect("highlight count can't go that high"),
|
||||
)
|
||||
|
@ -1039,15 +1088,22 @@ async fn load_joined_room(
|
|||
.rooms
|
||||
.edus
|
||||
.read_receipt
|
||||
.readreceipts_since(&room_id, since)
|
||||
.readreceipts_since(room_id, since)
|
||||
.filter_map(|r| r.ok()) // Filter out buggy events
|
||||
.map(|(_, _, v)| v)
|
||||
.collect();
|
||||
|
||||
if services().rooms.edus.typing.last_typing_update(&room_id)? > since {
|
||||
if services()
|
||||
.rooms
|
||||
.edus
|
||||
.typing
|
||||
.last_typing_update(room_id)
|
||||
.await?
|
||||
> since
|
||||
{
|
||||
edus.push(
|
||||
serde_json::from_str(
|
||||
&serde_json::to_string(&services().rooms.edus.typing.typings_all(&room_id)?)
|
||||
&serde_json::to_string(&services().rooms.edus.typing.typings_all(room_id).await?)
|
||||
.expect("event is valid, we just created it"),
|
||||
)
|
||||
.expect("event is valid, we just created it"),
|
||||
|
@ -1056,7 +1112,7 @@ async fn load_joined_room(
|
|||
|
||||
// Save the state after this sync so we can send the correct state diff next sync
|
||||
services().rooms.user.associate_token_shortstatehash(
|
||||
&room_id,
|
||||
room_id,
|
||||
next_batch,
|
||||
current_shortstatehash,
|
||||
)?;
|
||||
|
@ -1065,7 +1121,7 @@ async fn load_joined_room(
|
|||
account_data: RoomAccountData {
|
||||
events: services()
|
||||
.account_data
|
||||
.changes_since(Some(&room_id), &sender_user, since)?
|
||||
.changes_since(Some(room_id), sender_user, since)?
|
||||
.into_iter()
|
||||
.filter_map(|(_, v)| {
|
||||
serde_json::from_str(v.json().get())
|
||||
|
@ -1110,13 +1166,13 @@ fn load_timeline(
|
|||
if services()
|
||||
.rooms
|
||||
.timeline
|
||||
.last_timeline_count(&sender_user, &room_id)?
|
||||
.last_timeline_count(sender_user, room_id)?
|
||||
> roomsincecount
|
||||
{
|
||||
let mut non_timeline_pdus = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.pdus_until(&sender_user, &room_id, PduCount::max())?
|
||||
.pdus_until(sender_user, room_id, PduCount::max())?
|
||||
.filter_map(|r| {
|
||||
// Filter out buggy events
|
||||
if r.is_err() {
|
||||
|
@ -1172,7 +1228,6 @@ fn share_encrypted_room(
|
|||
pub async fn sync_events_v4_route(
|
||||
body: Ruma<sync_events::v4::Request>,
|
||||
) -> Result<sync_events::v4::Response, RumaResponse<UiaaResponse>> {
|
||||
dbg!(&body.body);
|
||||
let sender_user = body.sender_user.expect("user is authenticated");
|
||||
let sender_device = body.sender_device.expect("user is authenticated");
|
||||
let mut body = body.body;
|
||||
|
@ -1232,7 +1287,7 @@ pub async fn sync_events_v4_route(
|
|||
|
||||
for room_id in &all_joined_rooms {
|
||||
let current_shortstatehash =
|
||||
if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? {
|
||||
if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? {
|
||||
s
|
||||
} else {
|
||||
error!("Room {} has no state", room_id);
|
||||
|
@ -1242,7 +1297,7 @@ pub async fn sync_events_v4_route(
|
|||
let since_shortstatehash = services()
|
||||
.rooms
|
||||
.user
|
||||
.get_token_shortstatehash(&room_id, globalsince)?;
|
||||
.get_token_shortstatehash(room_id, globalsince)?;
|
||||
|
||||
let since_sender_member: Option<RoomMemberEventContent> = since_shortstatehash
|
||||
.and_then(|shortstatehash| {
|
||||
|
@ -1331,7 +1386,7 @@ pub async fn sync_events_v4_route(
|
|||
if !share_encrypted_room(
|
||||
&sender_user,
|
||||
&user_id,
|
||||
&room_id,
|
||||
room_id,
|
||||
)? {
|
||||
device_list_changes.insert(user_id);
|
||||
}
|
||||
|
@ -1352,7 +1407,7 @@ pub async fn sync_events_v4_route(
|
|||
services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(&room_id)
|
||||
.room_members(room_id)
|
||||
.flatten()
|
||||
.filter(|user_id| {
|
||||
// Don't send key updates from the sender to the sender
|
||||
|
@ -1360,7 +1415,7 @@ pub async fn sync_events_v4_route(
|
|||
})
|
||||
.filter(|user_id| {
|
||||
// Only send keys if the sender doesn't share an encrypted room with the target already
|
||||
!share_encrypted_room(&sender_user, user_id, &room_id)
|
||||
!share_encrypted_room(&sender_user, user_id, room_id)
|
||||
.unwrap_or(false)
|
||||
}),
|
||||
);
|
||||
|
@ -1451,7 +1506,7 @@ pub async fn sync_events_v4_route(
|
|||
}
|
||||
sync_events::v4::SyncOp {
|
||||
op: SlidingOp::Sync,
|
||||
range: Some(r.clone()),
|
||||
range: Some(r),
|
||||
index: None,
|
||||
room_ids,
|
||||
room_id: None,
|
||||
|
@ -1476,6 +1531,9 @@ pub async fn sync_events_v4_route(
|
|||
|
||||
let mut known_subscription_rooms = BTreeSet::new();
|
||||
for (room_id, room) in &body.room_subscriptions {
|
||||
if !services().rooms.metadata.exists(room_id)? {
|
||||
continue;
|
||||
}
|
||||
let todo_room = todo_rooms
|
||||
.entry(room_id.clone())
|
||||
.or_insert((BTreeSet::new(), 0, u64::MAX));
|
||||
|
@ -1514,7 +1572,7 @@ pub async fn sync_events_v4_route(
|
|||
sender_user.clone(),
|
||||
sender_device.clone(),
|
||||
conn_id.clone(),
|
||||
body.room_subscriptions,
|
||||
body.room_subscriptions.clone(),
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -1523,7 +1581,7 @@ pub async fn sync_events_v4_route(
|
|||
let roomsincecount = PduCount::Normal(*roomsince);
|
||||
|
||||
let (timeline_pdus, limited) =
|
||||
load_timeline(&sender_user, &room_id, roomsincecount, *timeline_limit)?;
|
||||
load_timeline(&sender_user, room_id, roomsincecount, *timeline_limit)?;
|
||||
|
||||
if roomsince != &0 && timeline_pdus.is_empty() {
|
||||
continue;
|
||||
|
@ -1555,63 +1613,62 @@ pub async fn sync_events_v4_route(
|
|||
|
||||
let required_state = required_state_request
|
||||
.iter()
|
||||
.map(|state| {
|
||||
.flat_map(|state| {
|
||||
services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(&room_id, &state.0, &state.1)
|
||||
})
|
||||
.filter_map(|r| r.ok())
|
||||
.filter_map(|o| o)
|
||||
.room_state_get(room_id, &state.0, &state.1)
|
||||
.ok()
|
||||
.flatten()
|
||||
.map(|state| state.to_sync_state_event())
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Heroes
|
||||
let heroes = services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(&room_id)
|
||||
.room_members(room_id)
|
||||
.filter_map(|r| r.ok())
|
||||
.filter(|member| member != &sender_user)
|
||||
.map(|member| {
|
||||
Ok::<_, Error>(
|
||||
.flat_map(|member| {
|
||||
services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_member(&room_id, &member)?
|
||||
.map(|memberevent| {
|
||||
(
|
||||
memberevent
|
||||
.displayname
|
||||
.unwrap_or_else(|| member.to_string()),
|
||||
memberevent.avatar_url,
|
||||
)
|
||||
}),
|
||||
)
|
||||
.get_member(room_id, &member)
|
||||
.ok()
|
||||
.flatten()
|
||||
.map(|memberevent| SlidingSyncRoomHero {
|
||||
user_id: member,
|
||||
name: memberevent.displayname,
|
||||
avatar: memberevent.avatar_url,
|
||||
})
|
||||
})
|
||||
.filter_map(|r| r.ok())
|
||||
.filter_map(|o| o)
|
||||
.take(5)
|
||||
.collect::<Vec<_>>();
|
||||
let name = if heroes.len() > 1 {
|
||||
let last = heroes[0].0.clone();
|
||||
Some(
|
||||
heroes[1..]
|
||||
let name = match &heroes[..] {
|
||||
[] => None,
|
||||
[only] => Some(
|
||||
only.name
|
||||
.clone()
|
||||
.unwrap_or_else(|| only.user_id.to_string()),
|
||||
),
|
||||
[firsts @ .., last] => Some(
|
||||
firsts
|
||||
.iter()
|
||||
.map(|h| h.0.clone())
|
||||
.map(|h| h.name.clone().unwrap_or_else(|| h.user_id.to_string()))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
+ " and "
|
||||
+ &last,
|
||||
)
|
||||
} else if heroes.len() == 1 {
|
||||
Some(heroes[0].0.clone())
|
||||
} else {
|
||||
None
|
||||
+ &last
|
||||
.name
|
||||
.clone()
|
||||
.unwrap_or_else(|| last.user_id.to_string()),
|
||||
),
|
||||
};
|
||||
|
||||
let avatar = if heroes.len() == 1 {
|
||||
heroes[0].1.clone()
|
||||
let avatar = if let [only] = &heroes[..] {
|
||||
only.avatar.clone()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
@ -1619,16 +1676,16 @@ pub async fn sync_events_v4_route(
|
|||
rooms.insert(
|
||||
room_id.clone(),
|
||||
sync_events::v4::SlidingSyncRoom {
|
||||
name: services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_name(&room_id)?
|
||||
.or_else(|| name),
|
||||
avatar: services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_avatar(&room_id)?
|
||||
.map_or(avatar, |a| a.url),
|
||||
name: services().rooms.state_accessor.get_name(room_id)?.or(name),
|
||||
avatar: if let Some(avatar) = avatar {
|
||||
JsOption::Some(avatar)
|
||||
} else {
|
||||
match services().rooms.state_accessor.get_avatar(room_id)? {
|
||||
JsOption::Some(avatar) => JsOption::from_option(avatar.url),
|
||||
JsOption::Null => JsOption::Null,
|
||||
JsOption::Undefined => JsOption::Undefined,
|
||||
}
|
||||
},
|
||||
initial: Some(roomsince == &0),
|
||||
is_dm: None,
|
||||
invite_state: None,
|
||||
|
@ -1637,7 +1694,7 @@ pub async fn sync_events_v4_route(
|
|||
services()
|
||||
.rooms
|
||||
.user
|
||||
.highlight_count(&sender_user, &room_id)?
|
||||
.highlight_count(&sender_user, room_id)?
|
||||
.try_into()
|
||||
.expect("notification count can't go that high"),
|
||||
),
|
||||
|
@ -1645,7 +1702,7 @@ pub async fn sync_events_v4_route(
|
|||
services()
|
||||
.rooms
|
||||
.user
|
||||
.notification_count(&sender_user, &room_id)?
|
||||
.notification_count(&sender_user, room_id)?
|
||||
.try_into()
|
||||
.expect("notification count can't go that high"),
|
||||
),
|
||||
|
@ -1658,7 +1715,7 @@ pub async fn sync_events_v4_route(
|
|||
(services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_joined_count(&room_id)?
|
||||
.room_joined_count(room_id)?
|
||||
.unwrap_or(0) as u32)
|
||||
.into(),
|
||||
),
|
||||
|
@ -1666,12 +1723,22 @@ pub async fn sync_events_v4_route(
|
|||
(services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_invited_count(&room_id)?
|
||||
.room_invited_count(room_id)?
|
||||
.unwrap_or(0) as u32)
|
||||
.into(),
|
||||
),
|
||||
num_live: None, // Count events in timeline greater than global sync counter
|
||||
timestamp: None,
|
||||
heroes: if body
|
||||
.room_subscriptions
|
||||
.get(room_id)
|
||||
.map(|sub| sub.include_heroes.unwrap_or_default())
|
||||
.unwrap_or_default()
|
||||
{
|
||||
Some(heroes)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
},
|
||||
);
|
||||
}
|
||||
|
@ -1689,7 +1756,7 @@ pub async fn sync_events_v4_route(
|
|||
let _ = tokio::time::timeout(duration, watcher).await;
|
||||
}
|
||||
|
||||
Ok(dbg!(sync_events::v4::Response {
|
||||
Ok(sync_events::v4::Response {
|
||||
initial: globalsince == 0,
|
||||
txn_id: body.txn_id.clone(),
|
||||
pos: next_batch.to_string(),
|
||||
|
@ -1744,5 +1811,5 @@ pub async fn sync_events_v4_route(
|
|||
},
|
||||
},
|
||||
delta_token: None,
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
|
|
@ -17,23 +17,29 @@ pub async fn create_typing_event_route(
|
|||
.is_joined(sender_user, &body.room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"You are not in this room.",
|
||||
));
|
||||
}
|
||||
|
||||
if let Typing::Yes(duration) = body.state {
|
||||
services().rooms.edus.typing.typing_add(
|
||||
services()
|
||||
.rooms
|
||||
.edus
|
||||
.typing
|
||||
.typing_add(
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
duration.as_millis() as u64 + utils::millis_since_unix_epoch(),
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
services()
|
||||
.rooms
|
||||
.edus
|
||||
.typing
|
||||
.typing_remove(sender_user, &body.room_id)?;
|
||||
.typing_remove(sender_user, &body.room_id)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(create_typing_event::v3::Response {})
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
use std::{collections::BTreeMap, iter::FromIterator};
|
||||
|
||||
use axum::{response::IntoResponse, Json};
|
||||
use ruma::api::client::{discovery::get_supported_versions, error::ErrorKind};
|
||||
use ruma::api::client::discovery::get_supported_versions;
|
||||
|
||||
use crate::{services, Error, Result, Ruma};
|
||||
use crate::{Result, Ruma};
|
||||
|
||||
/// # `GET /_matrix/client/versions`
|
||||
///
|
||||
|
@ -26,24 +25,13 @@ pub async fn get_supported_versions_route(
|
|||
"v1.2".to_owned(),
|
||||
"v1.3".to_owned(),
|
||||
"v1.4".to_owned(),
|
||||
"v1.5".to_owned(),
|
||||
],
|
||||
unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]),
|
||||
unstable_features: BTreeMap::from_iter([
|
||||
("org.matrix.e2e_cross_signing".to_owned(), true),
|
||||
("org.matrix.msc3916.stable".to_owned(), true),
|
||||
]),
|
||||
};
|
||||
|
||||
Ok(resp)
|
||||
}
|
||||
|
||||
/// # `GET /.well-known/matrix/client`
|
||||
pub async fn well_known_client_route(
|
||||
_body: Ruma<get_supported_versions::Request>,
|
||||
) -> Result<impl IntoResponse> {
|
||||
let client_url = match services().globals.well_known_client() {
|
||||
Some(url) => url.clone(),
|
||||
None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")),
|
||||
};
|
||||
|
||||
Ok(Json(serde_json::json!({
|
||||
"m.homeserver": {"base_url": client_url},
|
||||
"org.matrix.msc3575.proxy": {"url": client_url}
|
||||
})))
|
||||
}
|
||||
|
|
|
@ -48,6 +48,9 @@ pub async fn search_users_route(
|
|||
return None;
|
||||
}
|
||||
|
||||
// It's a matching user, but is the sender allowed to see them?
|
||||
let mut user_visible = false;
|
||||
|
||||
let user_is_in_public_rooms = services()
|
||||
.rooms
|
||||
.state_cache
|
||||
|
@ -69,9 +72,8 @@ pub async fn search_users_route(
|
|||
});
|
||||
|
||||
if user_is_in_public_rooms {
|
||||
return Some(user);
|
||||
}
|
||||
|
||||
user_visible = true;
|
||||
} else {
|
||||
let user_is_in_shared_rooms = services()
|
||||
.rooms
|
||||
.user
|
||||
|
@ -81,10 +83,15 @@ pub async fn search_users_route(
|
|||
.is_some();
|
||||
|
||||
if user_is_in_shared_rooms {
|
||||
return Some(user);
|
||||
user_visible = true;
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
if !user_visible {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(user)
|
||||
});
|
||||
|
||||
let results = users.by_ref().take(limit).collect();
|
||||
|
|
22
src/api/client_server/well_known.rs
Normal file
22
src/api/client_server/well_known.rs
Normal file
|
@ -0,0 +1,22 @@
|
|||
use ruma::api::client::discovery::discover_homeserver::{
|
||||
self, HomeserverInfo, SlidingSyncProxyInfo,
|
||||
};
|
||||
|
||||
use crate::{services, Result, Ruma};
|
||||
|
||||
/// # `GET /.well-known/matrix/client`
|
||||
///
|
||||
/// Returns the client server discovery information.
|
||||
pub async fn well_known_client(
|
||||
_body: Ruma<discover_homeserver::Request>,
|
||||
) -> Result<discover_homeserver::Response> {
|
||||
let client_url = services().globals.well_known_client();
|
||||
|
||||
Ok(discover_homeserver::Response {
|
||||
homeserver: HomeserverInfo {
|
||||
base_url: client_url.clone(),
|
||||
},
|
||||
identity_server: None,
|
||||
sliding_sync_proxy: Some(SlidingSyncProxyInfo { url: client_url }),
|
||||
})
|
||||
}
|
|
@ -2,59 +2,64 @@ use std::{collections::BTreeMap, iter::FromIterator, str};
|
|||
|
||||
use axum::{
|
||||
async_trait,
|
||||
body::{Full, HttpBody},
|
||||
extract::{rejection::TypedHeaderRejectionReason, FromRequest, Path, TypedHeader},
|
||||
headers::{
|
||||
authorization::{Bearer, Credentials},
|
||||
Authorization,
|
||||
},
|
||||
body::Body,
|
||||
extract::{FromRequest, Path},
|
||||
response::{IntoResponse, Response},
|
||||
BoxError, RequestExt, RequestPartsExt,
|
||||
RequestExt, RequestPartsExt,
|
||||
};
|
||||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||
use axum_extra::{
|
||||
headers::{authorization::Bearer, Authorization},
|
||||
typed_header::TypedHeaderRejectionReason,
|
||||
TypedHeader,
|
||||
};
|
||||
use bytes::{BufMut, BytesMut};
|
||||
use http::{Request, StatusCode};
|
||||
use ruma::{
|
||||
api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse},
|
||||
CanonicalJsonValue, OwnedDeviceId, OwnedServerName, UserId,
|
||||
server_util::authorization::XMatrix,
|
||||
CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedUserId, UserId,
|
||||
};
|
||||
use serde::Deserialize;
|
||||
use tracing::{debug, error, warn};
|
||||
|
||||
use super::{Ruma, RumaResponse};
|
||||
use crate::{services, Error, Result};
|
||||
use crate::{service::appservice::RegistrationInfo, services, Error, Result};
|
||||
|
||||
enum Token {
|
||||
Appservice(Box<RegistrationInfo>),
|
||||
User((OwnedUserId, OwnedDeviceId)),
|
||||
Invalid,
|
||||
None,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T, S, B> FromRequest<S, B> for Ruma<T>
|
||||
impl<T, S> FromRequest<S> for Ruma<T>
|
||||
where
|
||||
T: IncomingRequest,
|
||||
B: HttpBody + Send + 'static,
|
||||
B::Data: Send,
|
||||
B::Error: Into<BoxError>,
|
||||
{
|
||||
type Rejection = Error;
|
||||
|
||||
async fn from_request(req: Request<B>, _state: &S) -> Result<Self, Self::Rejection> {
|
||||
async fn from_request(req: Request<Body>, _state: &S) -> Result<Self, Self::Rejection> {
|
||||
#[derive(Deserialize)]
|
||||
struct QueryParams {
|
||||
access_token: Option<String>,
|
||||
user_id: Option<String>,
|
||||
}
|
||||
|
||||
let (mut parts, mut body) = match req.with_limited_body() {
|
||||
Ok(limited_req) => {
|
||||
let (mut parts, mut body) = {
|
||||
let limited_req = req.with_limited_body();
|
||||
let (parts, body) = limited_req.into_parts();
|
||||
let body = to_bytes(body)
|
||||
let body = axum::body::to_bytes(
|
||||
body,
|
||||
services()
|
||||
.globals
|
||||
.max_request_size()
|
||||
.try_into()
|
||||
.unwrap_or(usize::MAX),
|
||||
)
|
||||
.await
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?;
|
||||
(parts, body)
|
||||
}
|
||||
Err(original_req) => {
|
||||
let (parts, body) = original_req.into_parts();
|
||||
let body = to_bytes(body)
|
||||
.await
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?;
|
||||
(parts, body)
|
||||
}
|
||||
};
|
||||
|
||||
let metadata = T::METADATA;
|
||||
|
@ -78,77 +83,82 @@ where
|
|||
None => query_params.access_token.as_deref(),
|
||||
};
|
||||
|
||||
let token = if let Some(token) = token {
|
||||
if let Some(reg_info) = services().appservice.find_from_token(token).await {
|
||||
Token::Appservice(Box::new(reg_info.clone()))
|
||||
} else if let Some((user_id, device_id)) = services().users.find_from_token(token)? {
|
||||
Token::User((user_id, OwnedDeviceId::from(device_id)))
|
||||
} else {
|
||||
Token::Invalid
|
||||
}
|
||||
} else {
|
||||
Token::None
|
||||
};
|
||||
|
||||
let mut json_body = serde_json::from_slice::<CanonicalJsonValue>(&body).ok();
|
||||
|
||||
let appservices = services().appservice.all().unwrap();
|
||||
let appservice_registration = appservices.iter().find(|(_id, registration)| {
|
||||
registration
|
||||
.get("as_token")
|
||||
.and_then(|as_token| as_token.as_str())
|
||||
.map_or(false, |as_token| token == Some(as_token))
|
||||
});
|
||||
|
||||
let (sender_user, sender_device, sender_servername, from_appservice) =
|
||||
if let Some((_id, registration)) = appservice_registration {
|
||||
match metadata.authentication {
|
||||
AuthScheme::AccessToken => {
|
||||
let user_id = query_params.user_id.map_or_else(
|
||||
let (sender_user, sender_device, sender_servername, appservice_info) =
|
||||
match (metadata.authentication, token) {
|
||||
(_, Token::Invalid) => {
|
||||
// OpenID endpoint uses a query param with the same name, drop this once query params for user auth are removed from the spec
|
||||
if query_params.access_token.is_some() {
|
||||
(None, None, None, None)
|
||||
} else {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::UnknownToken { soft_logout: false },
|
||||
"Unknown access token.",
|
||||
));
|
||||
}
|
||||
}
|
||||
(AuthScheme::AccessToken, Token::Appservice(info)) => {
|
||||
let user_id = query_params
|
||||
.user_id
|
||||
.map_or_else(
|
||||
|| {
|
||||
UserId::parse_with_server_name(
|
||||
registration
|
||||
.get("sender_localpart")
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap(),
|
||||
info.registration.sender_localpart.as_str(),
|
||||
services().globals.server_name(),
|
||||
)
|
||||
.unwrap()
|
||||
},
|
||||
|s| UserId::parse(s).unwrap(),
|
||||
);
|
||||
UserId::parse,
|
||||
)
|
||||
.map_err(|_| {
|
||||
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
|
||||
})?;
|
||||
|
||||
if !services().users.exists(&user_id).unwrap() {
|
||||
if !info.is_user_match(&user_id) {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::Exclusive,
|
||||
"User is not in namespace.",
|
||||
));
|
||||
}
|
||||
|
||||
if !services().users.exists(&user_id)? {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::forbidden(),
|
||||
"User does not exist.",
|
||||
));
|
||||
}
|
||||
|
||||
// TODO: Check if appservice is allowed to be that user
|
||||
(Some(user_id), None, None, true)
|
||||
(Some(user_id), None, None, Some(*info))
|
||||
}
|
||||
AuthScheme::ServerSignatures => (None, None, None, true),
|
||||
AuthScheme::None => (None, None, None, true),
|
||||
}
|
||||
} else {
|
||||
match metadata.authentication {
|
||||
AuthScheme::AccessToken => {
|
||||
let token = match token {
|
||||
Some(token) => token,
|
||||
_ => {
|
||||
(
|
||||
AuthScheme::None
|
||||
| AuthScheme::AppserviceToken
|
||||
| AuthScheme::AccessTokenOptional,
|
||||
Token::Appservice(info),
|
||||
) => (None, None, None, Some(*info)),
|
||||
(AuthScheme::AccessToken, Token::None) => {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::MissingToken,
|
||||
"Missing access token.",
|
||||
))
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
match services().users.find_from_token(token).unwrap() {
|
||||
None => {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::UnknownToken { soft_logout: false },
|
||||
"Unknown access token.",
|
||||
))
|
||||
}
|
||||
Some((user_id, device_id)) => (
|
||||
Some(user_id),
|
||||
Some(OwnedDeviceId::from(device_id)),
|
||||
None,
|
||||
false,
|
||||
),
|
||||
}
|
||||
}
|
||||
AuthScheme::ServerSignatures => {
|
||||
(
|
||||
AuthScheme::AccessToken | AuthScheme::AccessTokenOptional | AuthScheme::None,
|
||||
Token::User((user_id, device_id)),
|
||||
) => (Some(user_id), Some(device_id), None, None),
|
||||
(AuthScheme::ServerSignatures, Token::None) => {
|
||||
let TypedHeader(Authorization(x_matrix)) = parts
|
||||
.extract::<TypedHeader<Authorization<XMatrix>>>()
|
||||
.await
|
||||
|
@ -165,17 +175,31 @@ where
|
|||
_ => "Unknown header-related error",
|
||||
};
|
||||
|
||||
Error::BadRequest(ErrorKind::Forbidden, msg)
|
||||
Error::BadRequest(ErrorKind::forbidden(), msg)
|
||||
})?;
|
||||
|
||||
if let Some(dest) = x_matrix.destination {
|
||||
if dest != services().globals.server_name() {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Unauthorized,
|
||||
"X-Matrix destination field does not match server name.",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let origin_signatures = BTreeMap::from_iter([(
|
||||
x_matrix.key.clone(),
|
||||
CanonicalJsonValue::String(x_matrix.sig),
|
||||
CanonicalJsonValue::String(x_matrix.sig.to_string()),
|
||||
)]);
|
||||
|
||||
let signatures = BTreeMap::from_iter([(
|
||||
x_matrix.origin.as_str().to_owned(),
|
||||
CanonicalJsonValue::Object(origin_signatures),
|
||||
CanonicalJsonValue::Object(
|
||||
origin_signatures
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v))
|
||||
.collect(),
|
||||
),
|
||||
)]);
|
||||
|
||||
let mut request_map = BTreeMap::from_iter([
|
||||
|
@ -210,7 +234,7 @@ where
|
|||
let keys_result = services()
|
||||
.rooms
|
||||
.event_handler
|
||||
.fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_owned()])
|
||||
.fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_string()], false)
|
||||
.await;
|
||||
|
||||
let keys = match keys_result {
|
||||
|
@ -218,17 +242,28 @@ where
|
|||
Err(e) => {
|
||||
warn!("Failed to fetch signing keys: {}", e);
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"Failed to fetch signing keys.",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let pub_key_map =
|
||||
BTreeMap::from_iter([(x_matrix.origin.as_str().to_owned(), keys)]);
|
||||
// Only verify_keys that are currently valid should be used for validating requests
|
||||
// as per MSC4029
|
||||
let pub_key_map = BTreeMap::from_iter([(
|
||||
x_matrix.origin.as_str().to_owned(),
|
||||
if keys.valid_until_ts > MilliSecondsSinceUnixEpoch::now() {
|
||||
keys.verify_keys
|
||||
.into_iter()
|
||||
.map(|(id, key)| (id, key.key))
|
||||
.collect()
|
||||
} else {
|
||||
BTreeMap::new()
|
||||
},
|
||||
)]);
|
||||
|
||||
match ruma::signatures::verify_json(&pub_key_map, &request_map) {
|
||||
Ok(()) => (None, None, Some(x_matrix.origin), false),
|
||||
Ok(()) => (None, None, Some(x_matrix.origin), None),
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"Failed to verify json request from {}: {}\n{:?}",
|
||||
|
@ -244,17 +279,33 @@ where
|
|||
}
|
||||
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"Failed to verify X-Matrix signatures.",
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
AuthScheme::None => (None, None, None, false),
|
||||
(
|
||||
AuthScheme::None
|
||||
| AuthScheme::AppserviceToken
|
||||
| AuthScheme::AccessTokenOptional,
|
||||
Token::None,
|
||||
) => (None, None, None, None),
|
||||
(AuthScheme::ServerSignatures, Token::Appservice(_) | Token::User(_)) => {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Unauthorized,
|
||||
"Only server signatures should be used on this endpoint.",
|
||||
));
|
||||
}
|
||||
(AuthScheme::AppserviceToken, Token::User(_)) => {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Unauthorized,
|
||||
"Only appservice access tokens should be used on this endpoint.",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let mut http_request = http::Request::builder().uri(parts.uri).method(parts.method);
|
||||
let mut http_request = Request::builder().uri(parts.uri).method(parts.method);
|
||||
*http_request.headers_mut().unwrap() = parts.headers;
|
||||
|
||||
if let Some(CanonicalJsonValue::Object(json_body)) = &mut json_body {
|
||||
|
@ -302,126 +353,17 @@ where
|
|||
sender_user,
|
||||
sender_device,
|
||||
sender_servername,
|
||||
from_appservice,
|
||||
appservice_info,
|
||||
json_body,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
struct XMatrix {
|
||||
origin: OwnedServerName,
|
||||
key: String, // KeyName?
|
||||
sig: String,
|
||||
}
|
||||
|
||||
impl Credentials for XMatrix {
|
||||
const SCHEME: &'static str = "X-Matrix";
|
||||
|
||||
fn decode(value: &http::HeaderValue) -> Option<Self> {
|
||||
debug_assert!(
|
||||
value.as_bytes().starts_with(b"X-Matrix "),
|
||||
"HeaderValue to decode should start with \"X-Matrix ..\", received = {value:?}",
|
||||
);
|
||||
|
||||
let parameters = str::from_utf8(&value.as_bytes()["X-Matrix ".len()..])
|
||||
.ok()?
|
||||
.trim_start();
|
||||
|
||||
let mut origin = None;
|
||||
let mut key = None;
|
||||
let mut sig = None;
|
||||
|
||||
for entry in parameters.split_terminator(',') {
|
||||
let (name, value) = entry.split_once('=')?;
|
||||
|
||||
// It's not at all clear why some fields are quoted and others not in the spec,
|
||||
// let's simply accept either form for every field.
|
||||
let value = value
|
||||
.strip_prefix('"')
|
||||
.and_then(|rest| rest.strip_suffix('"'))
|
||||
.unwrap_or(value);
|
||||
|
||||
// FIXME: Catch multiple fields of the same name
|
||||
match name {
|
||||
"origin" => origin = Some(value.try_into().ok()?),
|
||||
"key" => key = Some(value.to_owned()),
|
||||
"sig" => sig = Some(value.to_owned()),
|
||||
_ => debug!(
|
||||
"Unexpected field `{}` in X-Matrix Authorization header",
|
||||
name
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
Some(Self {
|
||||
origin: origin?,
|
||||
key: key?,
|
||||
sig: sig?,
|
||||
})
|
||||
}
|
||||
|
||||
fn encode(&self) -> http::HeaderValue {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: OutgoingResponse> IntoResponse for RumaResponse<T> {
|
||||
fn into_response(self) -> Response {
|
||||
match self.0.try_into_http_response::<BytesMut>() {
|
||||
Ok(res) => res.map(BytesMut::freeze).map(Full::new).into_response(),
|
||||
Ok(res) => res.map(BytesMut::freeze).map(Body::from).into_response(),
|
||||
Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// copied from hyper under the following license:
|
||||
// Copyright (c) 2014-2021 Sean McArthur
|
||||
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
pub(crate) async fn to_bytes<T>(body: T) -> Result<Bytes, T::Error>
|
||||
where
|
||||
T: HttpBody,
|
||||
{
|
||||
futures_util::pin_mut!(body);
|
||||
|
||||
// If there's only 1 chunk, we can just return Buf::to_bytes()
|
||||
let mut first = if let Some(buf) = body.data().await {
|
||||
buf?
|
||||
} else {
|
||||
return Ok(Bytes::new());
|
||||
};
|
||||
|
||||
let second = if let Some(buf) = body.data().await {
|
||||
buf?
|
||||
} else {
|
||||
return Ok(first.copy_to_bytes(first.remaining()));
|
||||
};
|
||||
|
||||
// With more than 1 buf, we gotta flatten into a Vec first.
|
||||
let cap = first.remaining() + second.remaining() + body.size_hint().lower() as usize;
|
||||
let mut vec = Vec::with_capacity(cap);
|
||||
vec.put(first);
|
||||
vec.put(second);
|
||||
|
||||
while let Some(buf) = body.data().await {
|
||||
vec.put(buf?);
|
||||
}
|
||||
|
||||
Ok(vec.into())
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use crate::Error;
|
||||
use crate::{service::appservice::RegistrationInfo, Error};
|
||||
use ruma::{
|
||||
api::client::uiaa::UiaaResponse, CanonicalJsonValue, OwnedDeviceId, OwnedServerName,
|
||||
OwnedUserId,
|
||||
|
@ -16,7 +16,7 @@ pub struct Ruma<T> {
|
|||
pub sender_servername: Option<OwnedServerName>,
|
||||
// This is None when body is not a valid string
|
||||
pub json_body: Option<CanonicalJsonValue>,
|
||||
pub from_appservice: bool,
|
||||
pub appservice_info: Option<RegistrationInfo>,
|
||||
}
|
||||
|
||||
impl<T> Deref for Ruma<T> {
|
||||
|
|
File diff suppressed because it is too large
Load diff
27
src/clap.rs
Normal file
27
src/clap.rs
Normal file
|
@ -0,0 +1,27 @@
|
|||
//! Integration with `clap`
|
||||
|
||||
use clap::Parser;
|
||||
|
||||
/// Returns the current version of the crate with extra info if supplied
|
||||
///
|
||||
/// Set the environment variable `CONDUIT_VERSION_EXTRA` to any UTF-8 string to
|
||||
/// include it in parenthesis after the SemVer version. A common value are git
|
||||
/// commit hashes.
|
||||
fn version() -> String {
|
||||
let cargo_pkg_version = env!("CARGO_PKG_VERSION");
|
||||
|
||||
match option_env!("CONDUIT_VERSION_EXTRA") {
|
||||
Some(x) => format!("{} ({})", cargo_pkg_version, x),
|
||||
None => cargo_pkg_version.to_owned(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Command line arguments
|
||||
#[derive(Parser)]
|
||||
#[clap(about, version = version())]
|
||||
pub struct Args {}
|
||||
|
||||
/// Parse command line arguments into structured data
|
||||
pub fn parse() -> Args {
|
||||
Args::parse()
|
||||
}
|
|
@ -7,6 +7,7 @@ use std::{
|
|||
use ruma::{OwnedServerName, RoomVersionId};
|
||||
use serde::{de::IgnoredAny, Deserialize};
|
||||
use tracing::warn;
|
||||
use url::Url;
|
||||
|
||||
mod proxy;
|
||||
|
||||
|
@ -21,7 +22,6 @@ pub struct Config {
|
|||
pub tls: Option<TlsConfig>,
|
||||
|
||||
pub server_name: OwnedServerName,
|
||||
#[serde(default = "default_database_backend")]
|
||||
pub database_backend: String,
|
||||
pub database_path: String,
|
||||
#[serde(default = "default_db_cache_capacity_mb")]
|
||||
|
@ -47,6 +47,8 @@ pub struct Config {
|
|||
#[serde(default = "false_fn")]
|
||||
pub allow_registration: bool,
|
||||
pub registration_token: Option<String>,
|
||||
#[serde(default = "default_openid_token_ttl")]
|
||||
pub openid_token_ttl: u64,
|
||||
#[serde(default = "true_fn")]
|
||||
pub allow_encryption: bool,
|
||||
#[serde(default = "false_fn")]
|
||||
|
@ -57,7 +59,8 @@ pub struct Config {
|
|||
pub allow_unstable_room_versions: bool,
|
||||
#[serde(default = "default_default_room_version")]
|
||||
pub default_room_version: RoomVersionId,
|
||||
pub well_known_client: Option<String>,
|
||||
#[serde(default, flatten)]
|
||||
pub well_known: WellKnownConfig,
|
||||
#[serde(default = "false_fn")]
|
||||
pub allow_jaeger: bool,
|
||||
#[serde(default = "false_fn")]
|
||||
|
@ -92,6 +95,14 @@ pub struct TlsConfig {
|
|||
pub key: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Default)]
|
||||
pub struct WellKnownConfig {
|
||||
#[serde(rename = "well_known_client")]
|
||||
pub client: Option<Url>,
|
||||
#[serde(rename = "well_known_server")]
|
||||
pub server: Option<OwnedServerName>,
|
||||
}
|
||||
|
||||
const DEPRECATED_KEYS: &[&str] = &["cache_capacity"];
|
||||
|
||||
impl Config {
|
||||
|
@ -112,9 +123,35 @@ impl Config {
|
|||
}
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn well_known_client(&self) -> String {
|
||||
if let Some(url) = &self.well_known.client {
|
||||
url.to_string()
|
||||
} else {
|
||||
format!("https://{}", self.server_name)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn well_known_server(&self) -> OwnedServerName {
|
||||
match &self.well_known.server {
|
||||
Some(server_name) => server_name.to_owned(),
|
||||
None => {
|
||||
if self.server_name.port().is_some() {
|
||||
self.server_name.to_owned()
|
||||
} else {
|
||||
format!("{}:443", self.server_name.host())
|
||||
.try_into()
|
||||
.expect("Host from valid hostname + :443 must be valid")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Config {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
// Prepare a list of config values to show
|
||||
let well_known_server = self.well_known_server();
|
||||
let lines = [
|
||||
("Server name", self.server_name.host()),
|
||||
("Database backend", &self.database_backend),
|
||||
|
@ -195,6 +232,8 @@ impl fmt::Display for Config {
|
|||
}
|
||||
&lst.join(", ")
|
||||
}),
|
||||
("Well-known server name", well_known_server.as_str()),
|
||||
("Well-known client URL", &self.well_known_client()),
|
||||
];
|
||||
|
||||
let mut msg: String = "Active config values:\n\n".to_owned();
|
||||
|
@ -223,10 +262,6 @@ fn default_port() -> u16 {
|
|||
8000
|
||||
}
|
||||
|
||||
fn default_database_backend() -> String {
|
||||
"sqlite".to_owned()
|
||||
}
|
||||
|
||||
fn default_db_cache_capacity_mb() -> f64 {
|
||||
300.0
|
||||
}
|
||||
|
@ -264,14 +299,18 @@ fn default_trusted_servers() -> Vec<OwnedServerName> {
|
|||
}
|
||||
|
||||
fn default_log() -> String {
|
||||
"warn,state_res=warn,_=off,sled=off".to_owned()
|
||||
"warn,state_res=warn,_=off".to_owned()
|
||||
}
|
||||
|
||||
fn default_turn_ttl() -> u64 {
|
||||
60 * 60 * 24
|
||||
}
|
||||
|
||||
fn default_openid_token_ttl() -> u64 {
|
||||
60 * 60
|
||||
}
|
||||
|
||||
// I know, it's a great name
|
||||
pub fn default_default_room_version() -> RoomVersionId {
|
||||
RoomVersionId::V9
|
||||
RoomVersionId::V10
|
||||
}
|
||||
|
|
|
@ -29,7 +29,9 @@ use crate::Result;
|
|||
/// would be used for `ordinary.onion`, `matrix.myspecial.onion`, but not `hello.myspecial.onion`.
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[derive(Default)]
|
||||
pub enum ProxyConfig {
|
||||
#[default]
|
||||
None,
|
||||
Global {
|
||||
#[serde(deserialize_with = "crate::utils::deserialize_from_str")]
|
||||
|
@ -48,11 +50,6 @@ impl ProxyConfig {
|
|||
})
|
||||
}
|
||||
}
|
||||
impl Default for ProxyConfig {
|
||||
fn default() -> Self {
|
||||
ProxyConfig::None
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
pub struct PartialProxyConfig {
|
||||
|
|
|
@ -38,7 +38,6 @@ pub trait KeyValueDatabaseEngine: Send + Sync {
|
|||
fn memory_usage(&self) -> Result<String> {
|
||||
Ok("Current database engine does not support memory usage reporting.".to_owned())
|
||||
}
|
||||
fn clear_caches(&self) {}
|
||||
}
|
||||
|
||||
pub trait KvTree: Send + Sync {
|
||||
|
|
|
@ -116,7 +116,7 @@ impl KvTree for PersyTree {
|
|||
match iter {
|
||||
Ok(iter) => Box::new(iter.filter_map(|(k, v)| {
|
||||
v.into_iter()
|
||||
.map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
|
||||
.map(|val| ((*k).to_owned(), (*val).to_owned()))
|
||||
.next()
|
||||
})),
|
||||
Err(e) => {
|
||||
|
@ -142,7 +142,7 @@ impl KvTree for PersyTree {
|
|||
Ok(iter) => {
|
||||
let map = iter.filter_map(|(k, v)| {
|
||||
v.into_iter()
|
||||
.map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
|
||||
.map(|val| ((*k).to_owned(), (*val).to_owned()))
|
||||
.next()
|
||||
});
|
||||
if backwards {
|
||||
|
@ -179,7 +179,7 @@ impl KvTree for PersyTree {
|
|||
iter.take_while(move |(k, _)| (*k).starts_with(&owned_prefix))
|
||||
.filter_map(|(k, v)| {
|
||||
v.into_iter()
|
||||
.map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
|
||||
.map(|val| ((*k).to_owned(), (*val).to_owned()))
|
||||
.next()
|
||||
}),
|
||||
)
|
||||
|
|
|
@ -23,32 +23,29 @@ pub struct RocksDbEngineTree<'a> {
|
|||
fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::Options {
|
||||
let mut block_based_options = rocksdb::BlockBasedOptions::default();
|
||||
block_based_options.set_block_cache(rocksdb_cache);
|
||||
|
||||
// "Difference of spinning disk"
|
||||
// https://zhangyuchi.gitbooks.io/rocksdbbook/content/RocksDB-Tuning-Guide.html
|
||||
block_based_options.set_bloom_filter(10.0, false);
|
||||
block_based_options.set_block_size(4 * 1024);
|
||||
block_based_options.set_cache_index_and_filter_blocks(true);
|
||||
block_based_options.set_pin_l0_filter_and_index_blocks_in_cache(true);
|
||||
block_based_options.set_optimize_filters_for_memory(true);
|
||||
|
||||
let mut db_opts = rocksdb::Options::default();
|
||||
db_opts.set_block_based_table_factory(&block_based_options);
|
||||
db_opts.set_optimize_filters_for_hits(true);
|
||||
db_opts.set_skip_stats_update_on_db_open(true);
|
||||
db_opts.set_level_compaction_dynamic_level_bytes(true);
|
||||
db_opts.set_target_file_size_base(256 * 1024 * 1024);
|
||||
//db_opts.set_compaction_readahead_size(2 * 1024 * 1024);
|
||||
//db_opts.set_use_direct_reads(true);
|
||||
//db_opts.set_use_direct_io_for_flush_and_compaction(true);
|
||||
db_opts.create_if_missing(true);
|
||||
db_opts.increase_parallelism(num_cpus::get() as i32);
|
||||
db_opts.set_max_open_files(max_open_files);
|
||||
db_opts.set_compression_type(rocksdb::DBCompressionType::Zstd);
|
||||
db_opts.set_compression_type(rocksdb::DBCompressionType::Lz4);
|
||||
db_opts.set_bottommost_compression_type(rocksdb::DBCompressionType::Zstd);
|
||||
db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level);
|
||||
db_opts.optimize_level_style_compaction(10 * 1024 * 1024);
|
||||
|
||||
// https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning
|
||||
db_opts.set_level_compaction_dynamic_level_bytes(true);
|
||||
db_opts.set_max_background_jobs(6);
|
||||
db_opts.set_bytes_per_sync(1048576);
|
||||
|
||||
// https://github.com/facebook/rocksdb/issues/849
|
||||
db_opts.set_keep_log_file_num(100);
|
||||
|
||||
// https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes#ktoleratecorruptedtailrecords
|
||||
//
|
||||
// Unclean shutdowns of a Matrix homeserver are likely to be fine when
|
||||
|
@ -56,9 +53,6 @@ fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::O
|
|||
// restored via federation.
|
||||
db_opts.set_wal_recovery_mode(rocksdb::DBRecoveryMode::TolerateCorruptedTailRecords);
|
||||
|
||||
let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1);
|
||||
db_opts.set_prefix_extractor(prefix_extractor);
|
||||
|
||||
db_opts
|
||||
}
|
||||
|
||||
|
@ -132,8 +126,6 @@ impl KeyValueDatabaseEngine for Arc<Engine> {
|
|||
self.cache.get_pinned_usage() as f64 / 1024.0 / 1024.0,
|
||||
))
|
||||
}
|
||||
|
||||
fn clear_caches(&self) {}
|
||||
}
|
||||
|
||||
impl RocksDbEngineTree<'_> {
|
||||
|
@ -144,12 +136,17 @@ impl RocksDbEngineTree<'_> {
|
|||
|
||||
impl KvTree for RocksDbEngineTree<'_> {
|
||||
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
||||
Ok(self.db.rocks.get_cf(&self.cf(), key)?)
|
||||
let readoptions = rocksdb::ReadOptions::default();
|
||||
|
||||
Ok(self.db.rocks.get_cf_opt(&self.cf(), key, &readoptions)?)
|
||||
}
|
||||
|
||||
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> {
|
||||
let writeoptions = rocksdb::WriteOptions::default();
|
||||
let lock = self.write_lock.read().unwrap();
|
||||
self.db.rocks.put_cf(&self.cf(), key, value)?;
|
||||
self.db
|
||||
.rocks
|
||||
.put_cf_opt(&self.cf(), key, value, &writeoptions)?;
|
||||
drop(lock);
|
||||
|
||||
self.watchers.wake(key);
|
||||
|
@ -158,22 +155,31 @@ impl KvTree for RocksDbEngineTree<'_> {
|
|||
}
|
||||
|
||||
fn insert_batch<'a>(&self, iter: &mut dyn Iterator<Item = (Vec<u8>, Vec<u8>)>) -> Result<()> {
|
||||
let writeoptions = rocksdb::WriteOptions::default();
|
||||
for (key, value) in iter {
|
||||
self.db.rocks.put_cf(&self.cf(), key, value)?;
|
||||
self.db
|
||||
.rocks
|
||||
.put_cf_opt(&self.cf(), key, value, &writeoptions)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn remove(&self, key: &[u8]) -> Result<()> {
|
||||
Ok(self.db.rocks.delete_cf(&self.cf(), key)?)
|
||||
let writeoptions = rocksdb::WriteOptions::default();
|
||||
Ok(self
|
||||
.db
|
||||
.rocks
|
||||
.delete_cf_opt(&self.cf(), key, &writeoptions)?)
|
||||
}
|
||||
|
||||
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
||||
let readoptions = rocksdb::ReadOptions::default();
|
||||
|
||||
Box::new(
|
||||
self.db
|
||||
.rocks
|
||||
.iterator_cf(&self.cf(), rocksdb::IteratorMode::Start)
|
||||
.iterator_cf_opt(&self.cf(), readoptions, rocksdb::IteratorMode::Start)
|
||||
.map(|r| r.unwrap())
|
||||
.map(|(k, v)| (Vec::from(k), Vec::from(v))),
|
||||
)
|
||||
|
@ -184,11 +190,14 @@ impl KvTree for RocksDbEngineTree<'_> {
|
|||
from: &[u8],
|
||||
backwards: bool,
|
||||
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
||||
let readoptions = rocksdb::ReadOptions::default();
|
||||
|
||||
Box::new(
|
||||
self.db
|
||||
.rocks
|
||||
.iterator_cf(
|
||||
.iterator_cf_opt(
|
||||
&self.cf(),
|
||||
readoptions,
|
||||
rocksdb::IteratorMode::From(
|
||||
from,
|
||||
if backwards {
|
||||
|
@ -204,23 +213,33 @@ impl KvTree for RocksDbEngineTree<'_> {
|
|||
}
|
||||
|
||||
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
|
||||
let readoptions = rocksdb::ReadOptions::default();
|
||||
let writeoptions = rocksdb::WriteOptions::default();
|
||||
|
||||
let lock = self.write_lock.write().unwrap();
|
||||
|
||||
let old = self.db.rocks.get_cf(&self.cf(), key)?;
|
||||
let old = self.db.rocks.get_cf_opt(&self.cf(), key, &readoptions)?;
|
||||
let new = utils::increment(old.as_deref()).unwrap();
|
||||
self.db.rocks.put_cf(&self.cf(), key, &new)?;
|
||||
self.db
|
||||
.rocks
|
||||
.put_cf_opt(&self.cf(), key, &new, &writeoptions)?;
|
||||
|
||||
drop(lock);
|
||||
Ok(new)
|
||||
}
|
||||
|
||||
fn increment_batch<'a>(&self, iter: &mut dyn Iterator<Item = Vec<u8>>) -> Result<()> {
|
||||
let readoptions = rocksdb::ReadOptions::default();
|
||||
let writeoptions = rocksdb::WriteOptions::default();
|
||||
|
||||
let lock = self.write_lock.write().unwrap();
|
||||
|
||||
for key in iter {
|
||||
let old = self.db.rocks.get_cf(&self.cf(), &key)?;
|
||||
let old = self.db.rocks.get_cf_opt(&self.cf(), &key, &readoptions)?;
|
||||
let new = utils::increment(old.as_deref()).unwrap();
|
||||
self.db.rocks.put_cf(&self.cf(), key, new)?;
|
||||
self.db
|
||||
.rocks
|
||||
.put_cf_opt(&self.cf(), key, new, &writeoptions)?;
|
||||
}
|
||||
|
||||
drop(lock);
|
||||
|
@ -232,11 +251,14 @@ impl KvTree for RocksDbEngineTree<'_> {
|
|||
&'a self,
|
||||
prefix: Vec<u8>,
|
||||
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
||||
let readoptions = rocksdb::ReadOptions::default();
|
||||
|
||||
Box::new(
|
||||
self.db
|
||||
.rocks
|
||||
.iterator_cf(
|
||||
.iterator_cf_opt(
|
||||
&self.cf(),
|
||||
readoptions,
|
||||
rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward),
|
||||
)
|
||||
.map(|r| r.unwrap())
|
||||
|
|
|
@ -13,8 +13,8 @@ use thread_local::ThreadLocal;
|
|||
use tracing::debug;
|
||||
|
||||
thread_local! {
|
||||
static READ_CONNECTION: RefCell<Option<&'static Connection>> = RefCell::new(None);
|
||||
static READ_CONNECTION_ITERATOR: RefCell<Option<&'static Connection>> = RefCell::new(None);
|
||||
static READ_CONNECTION: RefCell<Option<&'static Connection>> = const { RefCell::new(None) };
|
||||
static READ_CONNECTION_ITERATOR: RefCell<Option<&'static Connection>> = const { RefCell::new(None) };
|
||||
}
|
||||
|
||||
struct PreparedStatementIterator<'a> {
|
||||
|
@ -33,7 +33,7 @@ impl Iterator for PreparedStatementIterator<'_> {
|
|||
struct NonAliasingBox<T>(*mut T);
|
||||
impl<T> Drop for NonAliasingBox<T> {
|
||||
fn drop(&mut self) {
|
||||
unsafe { Box::from_raw(self.0) };
|
||||
drop(unsafe { Box::from_raw(self.0) });
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ use tokio::sync::watch;
|
|||
|
||||
#[derive(Default)]
|
||||
pub(super) struct Watchers {
|
||||
#[allow(clippy::type_complexity)]
|
||||
watchers: RwLock<HashMap<Vec<u8>, (watch::Sender<()>, watch::Receiver<()>)>>,
|
||||
}
|
||||
|
||||
|
@ -19,7 +20,7 @@ impl Watchers {
|
|||
let mut rx = match self.watchers.write().unwrap().entry(prefix.to_vec()) {
|
||||
hash_map::Entry::Occupied(o) => o.get().1.clone(),
|
||||
hash_map::Entry::Vacant(v) => {
|
||||
let (tx, rx) = tokio::sync::watch::channel(());
|
||||
let (tx, rx) = watch::channel(());
|
||||
v.insert((tx, rx.clone()));
|
||||
rx
|
||||
}
|
||||
|
|
|
@ -123,13 +123,12 @@ impl service::account_data::Data for KeyValueDatabase {
|
|||
.take_while(move |(k, _)| k.starts_with(&prefix))
|
||||
.map(|(k, v)| {
|
||||
Ok::<_, Error>((
|
||||
RoomAccountDataEventType::try_from(
|
||||
RoomAccountDataEventType::from(
|
||||
utils::string_from_bytes(k.rsplit(|&b| b == 0xff).next().ok_or_else(
|
||||
|| Error::bad_database("RoomUserData ID in db is invalid."),
|
||||
)?)
|
||||
.map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?,
|
||||
)
|
||||
.map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?,
|
||||
),
|
||||
serde_json::from_slice::<Raw<AnyEphemeralRoomEvent>>(&v).map_err(|_| {
|
||||
Error::bad_database("Database contains invalid account data.")
|
||||
})?,
|
||||
|
|
|
@ -1,18 +1,15 @@
|
|||
use ruma::api::appservice::Registration;
|
||||
|
||||
use crate::{database::KeyValueDatabase, service, utils, Error, Result};
|
||||
|
||||
impl service::appservice::Data for KeyValueDatabase {
|
||||
/// Registers an appservice and returns the ID to the caller
|
||||
fn register_appservice(&self, yaml: serde_yaml::Value) -> Result<String> {
|
||||
// TODO: Rumaify
|
||||
let id = yaml.get("id").unwrap().as_str().unwrap();
|
||||
fn register_appservice(&self, yaml: Registration) -> Result<String> {
|
||||
let id = yaml.id.as_str();
|
||||
self.id_appserviceregistrations.insert(
|
||||
id.as_bytes(),
|
||||
serde_yaml::to_string(&yaml).unwrap().as_bytes(),
|
||||
)?;
|
||||
self.cached_registrations
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(id.to_owned(), yaml.to_owned());
|
||||
|
||||
Ok(id.to_owned())
|
||||
}
|
||||
|
@ -25,33 +22,18 @@ impl service::appservice::Data for KeyValueDatabase {
|
|||
fn unregister_appservice(&self, service_name: &str) -> Result<()> {
|
||||
self.id_appserviceregistrations
|
||||
.remove(service_name.as_bytes())?;
|
||||
self.cached_registrations
|
||||
.write()
|
||||
.unwrap()
|
||||
.remove(service_name);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_registration(&self, id: &str) -> Result<Option<serde_yaml::Value>> {
|
||||
self.cached_registrations
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(id)
|
||||
.map_or_else(
|
||||
|| {
|
||||
fn get_registration(&self, id: &str) -> Result<Option<Registration>> {
|
||||
self.id_appserviceregistrations
|
||||
.get(id.as_bytes())?
|
||||
.map(|bytes| {
|
||||
serde_yaml::from_slice(&bytes).map_err(|_| {
|
||||
Error::bad_database(
|
||||
"Invalid registration bytes in id_appserviceregistrations.",
|
||||
)
|
||||
Error::bad_database("Invalid registration bytes in id_appserviceregistrations.")
|
||||
})
|
||||
})
|
||||
.transpose()
|
||||
},
|
||||
|r| Ok(Some(r.clone())),
|
||||
)
|
||||
}
|
||||
|
||||
fn iter_ids<'a>(&'a self) -> Result<Box<dyn Iterator<Item = Result<String>> + 'a>> {
|
||||
|
@ -64,7 +46,7 @@ impl service::appservice::Data for KeyValueDatabase {
|
|||
)))
|
||||
}
|
||||
|
||||
fn all(&self) -> Result<Vec<(String, serde_yaml::Value)>> {
|
||||
fn all(&self) -> Result<Vec<(String, Registration)>> {
|
||||
self.iter_ids()?
|
||||
.filter_map(|id| id.ok())
|
||||
.map(move |id| {
|
||||
|
|
|
@ -1,15 +1,19 @@
|
|||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use futures_util::{stream::FuturesUnordered, StreamExt};
|
||||
use lru_cache::LruCache;
|
||||
use ruma::{
|
||||
api::federation::discovery::{ServerSigningKeys, VerifyKey},
|
||||
api::federation::discovery::{OldVerifyKey, ServerSigningKeys},
|
||||
signatures::Ed25519KeyPair,
|
||||
DeviceId, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName, UserId,
|
||||
DeviceId, ServerName, UserId,
|
||||
};
|
||||
|
||||
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||
use crate::{
|
||||
database::KeyValueDatabase,
|
||||
service::{self, globals::SigningKeys},
|
||||
services, utils, Error, Result,
|
||||
};
|
||||
|
||||
pub const COUNTER: &[u8] = b"c";
|
||||
pub const LAST_CHECK_FOR_UPDATES_COUNT: &[u8] = b"u";
|
||||
|
@ -94,7 +98,9 @@ impl service::globals::Data for KeyValueDatabase {
|
|||
futures.push(self.pduid_pdu.watch_prefix(&short_roomid));
|
||||
|
||||
// EDUs
|
||||
futures.push(self.roomid_lasttypingupdate.watch_prefix(&roomid_bytes));
|
||||
futures.push(Box::into_pin(Box::new(async move {
|
||||
let _result = services().rooms.edus.typing.wait_for_update(&room_id).await;
|
||||
})));
|
||||
|
||||
futures.push(self.readreceiptid_readreceipt.watch_prefix(&roomid_prefix));
|
||||
|
||||
|
@ -235,64 +241,97 @@ lasttimelinecount_cache: {lasttimelinecount_cache}\n"
|
|||
self.global.remove(b"keypair")
|
||||
}
|
||||
|
||||
fn add_signing_key(
|
||||
fn add_signing_key_from_trusted_server(
|
||||
&self,
|
||||
origin: &ServerName,
|
||||
new_keys: ServerSigningKeys,
|
||||
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
|
||||
// Not atomic, but this is not critical
|
||||
let signingkeys = self.server_signingkeys.get(origin.as_bytes())?;
|
||||
|
||||
let mut keys = signingkeys
|
||||
.and_then(|keys| serde_json::from_slice(&keys).ok())
|
||||
.unwrap_or_else(|| {
|
||||
// Just insert "now", it doesn't matter
|
||||
ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now())
|
||||
});
|
||||
) -> Result<SigningKeys> {
|
||||
let prev_keys = self.server_signingkeys.get(origin.as_bytes())?;
|
||||
|
||||
Ok(
|
||||
if let Some(mut prev_keys) =
|
||||
prev_keys.and_then(|keys| serde_json::from_slice::<ServerSigningKeys>(&keys).ok())
|
||||
{
|
||||
let ServerSigningKeys {
|
||||
verify_keys,
|
||||
old_verify_keys,
|
||||
..
|
||||
} = new_keys;
|
||||
|
||||
keys.verify_keys.extend(verify_keys.into_iter());
|
||||
keys.old_verify_keys.extend(old_verify_keys.into_iter());
|
||||
prev_keys.verify_keys.extend(verify_keys);
|
||||
prev_keys.old_verify_keys.extend(old_verify_keys);
|
||||
prev_keys.valid_until_ts = new_keys.valid_until_ts;
|
||||
|
||||
self.server_signingkeys.insert(
|
||||
origin.as_bytes(),
|
||||
&serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"),
|
||||
&serde_json::to_vec(&prev_keys).expect("serversigningkeys can be serialized"),
|
||||
)?;
|
||||
|
||||
let mut tree = keys.verify_keys;
|
||||
tree.extend(
|
||||
keys.old_verify_keys
|
||||
.into_iter()
|
||||
.map(|old| (old.0, VerifyKey::new(old.1.key))),
|
||||
);
|
||||
prev_keys.into()
|
||||
} else {
|
||||
self.server_signingkeys.insert(
|
||||
origin.as_bytes(),
|
||||
&serde_json::to_vec(&new_keys).expect("serversigningkeys can be serialized"),
|
||||
)?;
|
||||
|
||||
Ok(tree)
|
||||
new_keys.into()
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn add_signing_key_from_origin(
|
||||
&self,
|
||||
origin: &ServerName,
|
||||
new_keys: ServerSigningKeys,
|
||||
) -> Result<SigningKeys> {
|
||||
let prev_keys = self.server_signingkeys.get(origin.as_bytes())?;
|
||||
|
||||
Ok(
|
||||
if let Some(mut prev_keys) =
|
||||
prev_keys.and_then(|keys| serde_json::from_slice::<ServerSigningKeys>(&keys).ok())
|
||||
{
|
||||
let ServerSigningKeys {
|
||||
verify_keys,
|
||||
old_verify_keys,
|
||||
..
|
||||
} = new_keys;
|
||||
|
||||
// Moving `verify_keys` no longer present to `old_verify_keys`
|
||||
for (key_id, key) in prev_keys.verify_keys {
|
||||
if !verify_keys.contains_key(&key_id) {
|
||||
prev_keys
|
||||
.old_verify_keys
|
||||
.insert(key_id, OldVerifyKey::new(prev_keys.valid_until_ts, key.key));
|
||||
}
|
||||
}
|
||||
|
||||
prev_keys.verify_keys = verify_keys;
|
||||
prev_keys.old_verify_keys.extend(old_verify_keys);
|
||||
prev_keys.valid_until_ts = new_keys.valid_until_ts;
|
||||
|
||||
self.server_signingkeys.insert(
|
||||
origin.as_bytes(),
|
||||
&serde_json::to_vec(&prev_keys).expect("serversigningkeys can be serialized"),
|
||||
)?;
|
||||
|
||||
prev_keys.into()
|
||||
} else {
|
||||
self.server_signingkeys.insert(
|
||||
origin.as_bytes(),
|
||||
&serde_json::to_vec(&new_keys).expect("serversigningkeys can be serialized"),
|
||||
)?;
|
||||
|
||||
new_keys.into()
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server.
|
||||
fn signing_keys_for(
|
||||
&self,
|
||||
origin: &ServerName,
|
||||
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
|
||||
fn signing_keys_for(&self, origin: &ServerName) -> Result<Option<SigningKeys>> {
|
||||
let signingkeys = self
|
||||
.server_signingkeys
|
||||
.get(origin.as_bytes())?
|
||||
.and_then(|bytes| serde_json::from_slice(&bytes).ok())
|
||||
.map(|keys: ServerSigningKeys| {
|
||||
let mut tree = keys.verify_keys;
|
||||
tree.extend(
|
||||
keys.old_verify_keys
|
||||
.into_iter()
|
||||
.map(|old| (old.0, VerifyKey::new(old.1.key))),
|
||||
);
|
||||
tree
|
||||
})
|
||||
.unwrap_or_else(BTreeMap::new);
|
||||
.and_then(|bytes| serde_json::from_slice::<SigningKeys>(&bytes).ok());
|
||||
|
||||
Ok(signingkeys)
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use ruma::api::client::error::ErrorKind;
|
||||
use ruma::{api::client::error::ErrorKind, http_headers::ContentDisposition};
|
||||
|
||||
use crate::{database::KeyValueDatabase, service, utils, Error, Result};
|
||||
|
||||
|
@ -8,7 +8,7 @@ impl service::media::Data for KeyValueDatabase {
|
|||
mxc: String,
|
||||
width: u32,
|
||||
height: u32,
|
||||
content_disposition: Option<&str>,
|
||||
content_disposition: &ContentDisposition,
|
||||
content_type: Option<&str>,
|
||||
) -> Result<Vec<u8>> {
|
||||
let mut key = mxc.as_bytes().to_vec();
|
||||
|
@ -16,12 +16,7 @@ impl service::media::Data for KeyValueDatabase {
|
|||
key.extend_from_slice(&width.to_be_bytes());
|
||||
key.extend_from_slice(&height.to_be_bytes());
|
||||
key.push(0xff);
|
||||
key.extend_from_slice(
|
||||
content_disposition
|
||||
.as_ref()
|
||||
.map(|f| f.as_bytes())
|
||||
.unwrap_or_default(),
|
||||
);
|
||||
key.extend_from_slice(content_disposition.to_string().as_bytes());
|
||||
key.push(0xff);
|
||||
key.extend_from_slice(
|
||||
content_type
|
||||
|
@ -40,7 +35,7 @@ impl service::media::Data for KeyValueDatabase {
|
|||
mxc: String,
|
||||
width: u32,
|
||||
height: u32,
|
||||
) -> Result<(Option<String>, Option<String>, Vec<u8>)> {
|
||||
) -> Result<(ContentDisposition, Option<String>, Vec<u8>)> {
|
||||
let mut prefix = mxc.as_bytes().to_vec();
|
||||
prefix.push(0xff);
|
||||
prefix.extend_from_slice(&width.to_be_bytes());
|
||||
|
@ -68,15 +63,9 @@ impl service::media::Data for KeyValueDatabase {
|
|||
.next()
|
||||
.ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?;
|
||||
|
||||
let content_disposition = if content_disposition_bytes.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
utils::string_from_bytes(content_disposition_bytes).map_err(|_| {
|
||||
Error::bad_database("Content Disposition in mediaid_file is invalid unicode.")
|
||||
})?,
|
||||
)
|
||||
};
|
||||
let content_disposition = content_disposition_bytes.try_into().unwrap_or_else(|_| {
|
||||
ContentDisposition::new(ruma::http_headers::ContentDispositionType::Inline)
|
||||
});
|
||||
Ok((content_disposition, content_type, key))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,9 +1,15 @@
|
|||
use ruma::{api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId};
|
||||
use ruma::{
|
||||
api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId,
|
||||
UserId,
|
||||
};
|
||||
|
||||
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||
|
||||
impl service::rooms::alias::Data for KeyValueDatabase {
|
||||
fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> {
|
||||
fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()> {
|
||||
// Comes first as we don't want a stuck alias
|
||||
self.alias_userid
|
||||
.insert(alias.alias().as_bytes(), user_id.as_bytes())?;
|
||||
self.alias_roomid
|
||||
.insert(alias.alias().as_bytes(), room_id.as_bytes())?;
|
||||
let mut aliasid = room_id.as_bytes().to_vec();
|
||||
|
@ -22,13 +28,13 @@ impl service::rooms::alias::Data for KeyValueDatabase {
|
|||
self.aliasid_alias.remove(&key)?;
|
||||
}
|
||||
self.alias_roomid.remove(alias.alias().as_bytes())?;
|
||||
self.alias_userid.remove(alias.alias().as_bytes())
|
||||
} else {
|
||||
return Err(Error::BadRequest(
|
||||
Err(Error::BadRequest(
|
||||
ErrorKind::NotFound,
|
||||
"Alias does not exist.",
|
||||
));
|
||||
))
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedRoomId>> {
|
||||
|
@ -57,4 +63,16 @@ impl service::rooms::alias::Data for KeyValueDatabase {
|
|||
.map_err(|_| Error::bad_database("Invalid alias in aliasid_alias."))
|
||||
}))
|
||||
}
|
||||
|
||||
fn who_created_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedUserId>> {
|
||||
self.alias_userid
|
||||
.get(alias.alias().as_bytes())?
|
||||
.map(|bytes| {
|
||||
UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| {
|
||||
Error::bad_database("User ID in alias_userid is invalid unicode.")
|
||||
})?)
|
||||
.map_err(|_| Error::bad_database("User ID in alias_roomid is invalid."))
|
||||
})
|
||||
.transpose()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
mod presence;
|
||||
mod read_receipt;
|
||||
mod typing;
|
||||
|
||||
use crate::{database::KeyValueDatabase, service};
|
||||
|
||||
|
|
|
@ -1,127 +0,0 @@
|
|||
use std::{collections::HashSet, mem};
|
||||
|
||||
use ruma::{OwnedUserId, RoomId, UserId};
|
||||
|
||||
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||
|
||||
impl service::rooms::edus::typing::Data for KeyValueDatabase {
|
||||
fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> {
|
||||
let mut prefix = room_id.as_bytes().to_vec();
|
||||
prefix.push(0xff);
|
||||
|
||||
let count = services().globals.next_count()?.to_be_bytes();
|
||||
|
||||
let mut room_typing_id = prefix;
|
||||
room_typing_id.extend_from_slice(&timeout.to_be_bytes());
|
||||
room_typing_id.push(0xff);
|
||||
room_typing_id.extend_from_slice(&count);
|
||||
|
||||
self.typingid_userid
|
||||
.insert(&room_typing_id, user_id.as_bytes())?;
|
||||
|
||||
self.roomid_lasttypingupdate
|
||||
.insert(room_id.as_bytes(), &count)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
||||
let mut prefix = room_id.as_bytes().to_vec();
|
||||
prefix.push(0xff);
|
||||
|
||||
let user_id = user_id.to_string();
|
||||
|
||||
let mut found_outdated = false;
|
||||
|
||||
// Maybe there are multiple ones from calling roomtyping_add multiple times
|
||||
for outdated_edu in self
|
||||
.typingid_userid
|
||||
.scan_prefix(prefix)
|
||||
.filter(|(_, v)| &**v == user_id.as_bytes())
|
||||
{
|
||||
self.typingid_userid.remove(&outdated_edu.0)?;
|
||||
found_outdated = true;
|
||||
}
|
||||
|
||||
if found_outdated {
|
||||
self.roomid_lasttypingupdate.insert(
|
||||
room_id.as_bytes(),
|
||||
&services().globals.next_count()?.to_be_bytes(),
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn typings_maintain(&self, room_id: &RoomId) -> Result<()> {
|
||||
let mut prefix = room_id.as_bytes().to_vec();
|
||||
prefix.push(0xff);
|
||||
|
||||
let current_timestamp = utils::millis_since_unix_epoch();
|
||||
|
||||
let mut found_outdated = false;
|
||||
|
||||
// Find all outdated edus before inserting a new one
|
||||
for outdated_edu in self
|
||||
.typingid_userid
|
||||
.scan_prefix(prefix)
|
||||
.map(|(key, _)| {
|
||||
Ok::<_, Error>((
|
||||
key.clone(),
|
||||
utils::u64_from_bytes(
|
||||
&key.splitn(2, |&b| b == 0xff).nth(1).ok_or_else(|| {
|
||||
Error::bad_database("RoomTyping has invalid timestamp or delimiters.")
|
||||
})?[0..mem::size_of::<u64>()],
|
||||
)
|
||||
.map_err(|_| Error::bad_database("RoomTyping has invalid timestamp bytes."))?,
|
||||
))
|
||||
})
|
||||
.filter_map(|r| r.ok())
|
||||
.take_while(|&(_, timestamp)| timestamp < current_timestamp)
|
||||
{
|
||||
// This is an outdated edu (time > timestamp)
|
||||
self.typingid_userid.remove(&outdated_edu.0)?;
|
||||
found_outdated = true;
|
||||
}
|
||||
|
||||
if found_outdated {
|
||||
self.roomid_lasttypingupdate.insert(
|
||||
room_id.as_bytes(),
|
||||
&services().globals.next_count()?.to_be_bytes(),
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn last_typing_update(&self, room_id: &RoomId) -> Result<u64> {
|
||||
Ok(self
|
||||
.roomid_lasttypingupdate
|
||||
.get(room_id.as_bytes())?
|
||||
.map(|bytes| {
|
||||
utils::u64_from_bytes(&bytes).map_err(|_| {
|
||||
Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.")
|
||||
})
|
||||
})
|
||||
.transpose()?
|
||||
.unwrap_or(0))
|
||||
}
|
||||
|
||||
fn typings_all(&self, room_id: &RoomId) -> Result<HashSet<OwnedUserId>> {
|
||||
let mut prefix = room_id.as_bytes().to_vec();
|
||||
prefix.push(0xff);
|
||||
|
||||
let mut user_ids = HashSet::new();
|
||||
|
||||
for (_, user_id) in self.typingid_userid.scan_prefix(prefix) {
|
||||
let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| {
|
||||
Error::bad_database("User ID in typingid_userid is invalid unicode.")
|
||||
})?)
|
||||
.map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?;
|
||||
|
||||
user_ids.insert(user_id);
|
||||
}
|
||||
|
||||
Ok(user_ids)
|
||||
}
|
||||
}
|
|
@ -2,14 +2,20 @@ use ruma::RoomId;
|
|||
|
||||
use crate::{database::KeyValueDatabase, service, services, utils, Result};
|
||||
|
||||
impl service::rooms::search::Data for KeyValueDatabase {
|
||||
fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> {
|
||||
let mut batch = message_body
|
||||
.split_terminator(|c: char| !c.is_alphanumeric())
|
||||
/// Splits a string into tokens used as keys in the search inverted index
|
||||
///
|
||||
/// This may be used to tokenize both message bodies (for indexing) or search
|
||||
/// queries (for querying).
|
||||
fn tokenize(body: &str) -> impl Iterator<Item = String> + '_ {
|
||||
body.split_terminator(|c: char| !c.is_alphanumeric())
|
||||
.filter(|s| !s.is_empty())
|
||||
.filter(|word| word.len() <= 50)
|
||||
.map(str::to_lowercase)
|
||||
.map(|word| {
|
||||
}
|
||||
|
||||
impl service::rooms::search::Data for KeyValueDatabase {
|
||||
fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> {
|
||||
let mut batch = tokenize(message_body).map(|word| {
|
||||
let mut key = shortroomid.to_be_bytes().to_vec();
|
||||
key.extend_from_slice(word.as_bytes());
|
||||
key.push(0xff);
|
||||
|
@ -20,6 +26,22 @@ impl service::rooms::search::Data for KeyValueDatabase {
|
|||
self.tokenids.insert_batch(&mut batch)
|
||||
}
|
||||
|
||||
fn deindex_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> {
|
||||
let batch = tokenize(message_body).map(|word| {
|
||||
let mut key = shortroomid.to_be_bytes().to_vec();
|
||||
key.extend_from_slice(word.as_bytes());
|
||||
key.push(0xFF);
|
||||
key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here
|
||||
key
|
||||
});
|
||||
|
||||
for token in batch {
|
||||
self.tokenids.remove(&token)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn search_pdus<'a>(
|
||||
&'a self,
|
||||
room_id: &RoomId,
|
||||
|
@ -33,11 +55,7 @@ impl service::rooms::search::Data for KeyValueDatabase {
|
|||
.to_be_bytes()
|
||||
.to_vec();
|
||||
|
||||
let words: Vec<_> = search_string
|
||||
.split_terminator(|c: char| !c.is_alphanumeric())
|
||||
.filter(|s| !s.is_empty())
|
||||
.map(str::to_lowercase)
|
||||
.collect();
|
||||
let words: Vec<_> = tokenize(search_string).collect();
|
||||
|
||||
let iterators = words.clone().into_iter().map(move |word| {
|
||||
let mut prefix2 = prefix.clone();
|
||||
|
|
|
@ -157,10 +157,9 @@ impl service::rooms::short::Data for KeyValueDatabase {
|
|||
.ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?;
|
||||
|
||||
let event_type =
|
||||
StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| {
|
||||
StateEventType::from(utils::string_from_bytes(eventtype_bytes).map_err(|_| {
|
||||
Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.")
|
||||
})?)
|
||||
.map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?;
|
||||
})?);
|
||||
|
||||
let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| {
|
||||
Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.")
|
||||
|
|
|
@ -20,7 +20,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
|||
let parsed = services()
|
||||
.rooms
|
||||
.state_compressor
|
||||
.parse_compressed_state_event(&compressed)?;
|
||||
.parse_compressed_state_event(compressed)?;
|
||||
result.insert(parsed.0, parsed.1);
|
||||
|
||||
i += 1;
|
||||
|
@ -49,7 +49,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
|||
let (_, eventid) = services()
|
||||
.rooms
|
||||
.state_compressor
|
||||
.parse_compressed_state_event(&compressed)?;
|
||||
.parse_compressed_state_event(compressed)?;
|
||||
if let Some(pdu) = services().rooms.timeline.get_pdu(&eventid)? {
|
||||
result.insert(
|
||||
(
|
||||
|
@ -101,7 +101,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
|||
services()
|
||||
.rooms
|
||||
.state_compressor
|
||||
.parse_compressed_state_event(&compressed)
|
||||
.parse_compressed_state_event(compressed)
|
||||
.ok()
|
||||
.map(|(_, id)| id)
|
||||
}))
|
||||
|
|
|
@ -1,13 +1,16 @@
|
|||
use std::{collections::HashSet, sync::Arc};
|
||||
|
||||
use regex::Regex;
|
||||
use ruma::{
|
||||
events::{AnyStrippedStateEvent, AnySyncStateEvent},
|
||||
serde::Raw,
|
||||
OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
|
||||
};
|
||||
|
||||
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||
use crate::{
|
||||
database::KeyValueDatabase,
|
||||
service::{self, appservice::RegistrationInfo},
|
||||
services, utils, Error, Result,
|
||||
};
|
||||
|
||||
impl service::rooms::state_cache::Data for KeyValueDatabase {
|
||||
fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
||||
|
@ -184,46 +187,28 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
|
|||
}
|
||||
|
||||
#[tracing::instrument(skip(self, room_id, appservice))]
|
||||
fn appservice_in_room(
|
||||
&self,
|
||||
room_id: &RoomId,
|
||||
appservice: &(String, serde_yaml::Value),
|
||||
) -> Result<bool> {
|
||||
fn appservice_in_room(&self, room_id: &RoomId, appservice: &RegistrationInfo) -> Result<bool> {
|
||||
let maybe = self
|
||||
.appservice_in_room_cache
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(room_id)
|
||||
.and_then(|map| map.get(&appservice.0))
|
||||
.and_then(|map| map.get(&appservice.registration.id))
|
||||
.copied();
|
||||
|
||||
if let Some(b) = maybe {
|
||||
Ok(b)
|
||||
} else if let Some(namespaces) = appservice.1.get("namespaces") {
|
||||
let users = namespaces
|
||||
.get("users")
|
||||
.and_then(|users| users.as_sequence())
|
||||
.map_or_else(Vec::new, |users| {
|
||||
users
|
||||
.iter()
|
||||
.filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok())
|
||||
.collect::<Vec<_>>()
|
||||
});
|
||||
|
||||
let bridge_user_id = appservice
|
||||
.1
|
||||
.get("sender_localpart")
|
||||
.and_then(|string| string.as_str())
|
||||
.and_then(|string| {
|
||||
UserId::parse_with_server_name(string, services().globals.server_name()).ok()
|
||||
});
|
||||
} else {
|
||||
let bridge_user_id = UserId::parse_with_server_name(
|
||||
appservice.registration.sender_localpart.as_str(),
|
||||
services().globals.server_name(),
|
||||
)
|
||||
.ok();
|
||||
|
||||
let in_room = bridge_user_id
|
||||
.map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false))
|
||||
|| self.room_members(room_id).any(|userid| {
|
||||
userid.map_or(false, |userid| {
|
||||
users.iter().any(|r| r.is_match(userid.as_str()))
|
||||
})
|
||||
userid.map_or(false, |userid| appservice.users.is_match(userid.as_str()))
|
||||
});
|
||||
|
||||
self.appservice_in_room_cache
|
||||
|
@ -231,11 +216,9 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
|
|||
.unwrap()
|
||||
.entry(room_id.to_owned())
|
||||
.or_default()
|
||||
.insert(appservice.0.clone(), in_room);
|
||||
.insert(appservice.registration.id.clone(), in_room);
|
||||
|
||||
Ok(in_room)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -471,6 +454,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
|
|||
}
|
||||
|
||||
/// Returns an iterator over all rooms a user was invited to.
|
||||
#[allow(clippy::type_complexity)]
|
||||
#[tracing::instrument(skip(self))]
|
||||
fn rooms_invited<'a>(
|
||||
&'a self,
|
||||
|
@ -549,6 +533,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
|
|||
}
|
||||
|
||||
/// Returns an iterator over all rooms a user left.
|
||||
#[allow(clippy::type_complexity)]
|
||||
#[tracing::instrument(skip(self))]
|
||||
fn rooms_left<'a>(
|
||||
&'a self,
|
||||
|
|
|
@ -10,7 +10,7 @@ impl service::rooms::threads::Data for KeyValueDatabase {
|
|||
user_id: &'a UserId,
|
||||
room_id: &'a RoomId,
|
||||
until: u64,
|
||||
include: &'a IncludeThreads,
|
||||
_include: &'a IncludeThreads,
|
||||
) -> Result<Box<dyn Iterator<Item = Result<(u64, PduEvent)>> + 'a>> {
|
||||
let prefix = services()
|
||||
.rooms
|
||||
|
@ -27,7 +27,7 @@ impl service::rooms::threads::Data for KeyValueDatabase {
|
|||
self.threadid_userids
|
||||
.iter_from(¤t, true)
|
||||
.take_while(move |(k, _)| k.starts_with(&prefix))
|
||||
.map(move |(pduid, users)| {
|
||||
.map(move |(pduid, _users)| {
|
||||
let count = utils::u64_from_bytes(&pduid[(mem::size_of::<u64>())..])
|
||||
.map_err(|_| Error::bad_database("Invalid pduid in threadid_userids."))?;
|
||||
let mut pdu = services()
|
||||
|
@ -52,13 +52,13 @@ impl service::rooms::threads::Data for KeyValueDatabase {
|
|||
.collect::<Vec<_>>()
|
||||
.join(&[0xff][..]);
|
||||
|
||||
self.threadid_userids.insert(&root_id, &users)?;
|
||||
self.threadid_userids.insert(root_id, &users)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_participants(&self, root_id: &[u8]) -> Result<Option<Vec<OwnedUserId>>> {
|
||||
if let Some(users) = self.threadid_userids.get(&root_id)? {
|
||||
if let Some(users) = self.threadid_userids.get(root_id)? {
|
||||
Ok(Some(
|
||||
users
|
||||
.split(|b| *b == 0xff)
|
||||
|
|
|
@ -39,11 +39,10 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
|||
|
||||
/// Returns the `count` of this pdu's id.
|
||||
fn get_pdu_count(&self, event_id: &EventId) -> Result<Option<PduCount>> {
|
||||
Ok(self
|
||||
.eventid_pduid
|
||||
self.eventid_pduid
|
||||
.get(event_id.as_bytes())?
|
||||
.map(|pdu_id| pdu_count(&pdu_id))
|
||||
.transpose()?)
|
||||
.transpose()
|
||||
}
|
||||
|
||||
/// Returns the json of a pdu.
|
||||
|
@ -80,7 +79,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
|||
|
||||
/// Returns the pdu's id.
|
||||
fn get_pdu_id(&self, event_id: &EventId) -> Result<Option<Vec<u8>>> {
|
||||
Ok(self.eventid_pduid.get(event_id.as_bytes())?)
|
||||
self.eventid_pduid.get(event_id.as_bytes())
|
||||
}
|
||||
|
||||
/// Returns the pdu.
|
||||
|
@ -230,7 +229,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
|||
room_id: &RoomId,
|
||||
until: PduCount,
|
||||
) -> Result<Box<dyn Iterator<Item = Result<(PduCount, PduEvent)>> + 'a>> {
|
||||
let (prefix, current) = count_to_id(&room_id, until, 1, true)?;
|
||||
let (prefix, current) = count_to_id(room_id, until, 1, true)?;
|
||||
|
||||
let user_id = user_id.to_owned();
|
||||
|
||||
|
@ -257,7 +256,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
|||
room_id: &RoomId,
|
||||
from: PduCount,
|
||||
) -> Result<Box<dyn Iterator<Item = Result<(PduCount, PduEvent)>> + 'a>> {
|
||||
let (prefix, current) = count_to_id(&room_id, from, 1, false)?;
|
||||
let (prefix, current) = count_to_id(room_id, from, 1, false)?;
|
||||
|
||||
let user_id = user_id.to_owned();
|
||||
|
||||
|
@ -332,7 +331,7 @@ fn count_to_id(
|
|||
.rooms
|
||||
.short
|
||||
.get_shortroomid(room_id)?
|
||||
.expect("room exists")
|
||||
.ok_or_else(|| Error::bad_database("Looked for bad shortroomid in timeline"))?
|
||||
.to_be_bytes()
|
||||
.to_vec();
|
||||
let mut pdu_id = prefix.clone();
|
||||
|
|
|
@ -80,7 +80,7 @@ impl service::uiaa::Data for KeyValueDatabase {
|
|||
.userdevicesessionid_uiaainfo
|
||||
.get(&userdevicesessionid)?
|
||||
.ok_or(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
ErrorKind::forbidden(),
|
||||
"UIAA session does not exist.",
|
||||
))?,
|
||||
)
|
||||
|
|
|
@ -11,6 +11,7 @@ use ruma::{
|
|||
use tracing::warn;
|
||||
|
||||
use crate::{
|
||||
api::client_server::TOKEN_LENGTH,
|
||||
database::KeyValueDatabase,
|
||||
service::{self, users::clean_signatures},
|
||||
services, utils, Error, Result,
|
||||
|
@ -146,10 +147,9 @@ impl service::users::Data for KeyValueDatabase {
|
|||
self.userid_avatarurl
|
||||
.get(user_id.as_bytes())?
|
||||
.map(|bytes| {
|
||||
let s = utils::string_from_bytes(&bytes)
|
||||
.map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?;
|
||||
s.try_into()
|
||||
utils::string_from_bytes(&bytes)
|
||||
.map_err(|_| Error::bad_database("Avatar URL in db is invalid."))
|
||||
.map(Into::into)
|
||||
})
|
||||
.transpose()
|
||||
}
|
||||
|
@ -944,6 +944,52 @@ impl service::users::Data for KeyValueDatabase {
|
|||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
// Creates an OpenID token, which can be used to prove that a user has access to an account (primarily for integrations)
|
||||
fn create_openid_token(&self, user_id: &UserId) -> Result<(String, u64)> {
|
||||
let token = utils::random_string(TOKEN_LENGTH);
|
||||
|
||||
let expires_in = services().globals.config.openid_token_ttl;
|
||||
let expires_at = utils::millis_since_unix_epoch()
|
||||
.checked_add(expires_in * 1000)
|
||||
.expect("time is valid");
|
||||
|
||||
let mut value = expires_at.to_be_bytes().to_vec();
|
||||
value.extend_from_slice(user_id.as_bytes());
|
||||
|
||||
self.openidtoken_expiresatuserid
|
||||
.insert(token.as_bytes(), value.as_slice())?;
|
||||
|
||||
Ok((token, expires_in))
|
||||
}
|
||||
|
||||
/// Find out which user an OpenID access token belongs to.
|
||||
fn find_from_openid_token(&self, token: &str) -> Result<Option<OwnedUserId>> {
|
||||
let Some(value) = self.openidtoken_expiresatuserid.get(token.as_bytes())? else {
|
||||
return Ok(None);
|
||||
};
|
||||
let (expires_at_bytes, user_bytes) = value.split_at(0u64.to_be_bytes().len());
|
||||
|
||||
let expires_at = u64::from_be_bytes(
|
||||
expires_at_bytes
|
||||
.try_into()
|
||||
.map_err(|_| Error::bad_database("expires_at in openid_userid is invalid u64."))?,
|
||||
);
|
||||
|
||||
if expires_at < utils::millis_since_unix_epoch() {
|
||||
self.openidtoken_expiresatuserid.remove(token.as_bytes())?;
|
||||
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
Some(
|
||||
UserId::parse(utils::string_from_bytes(user_bytes).map_err(|_| {
|
||||
Error::bad_database("User ID in openid_userid is invalid unicode.")
|
||||
})?)
|
||||
.map_err(|_| Error::bad_database("User ID in openid_userid is invalid.")),
|
||||
)
|
||||
.transpose()
|
||||
}
|
||||
}
|
||||
|
||||
impl KeyValueDatabase {}
|
||||
|
|
|
@ -6,8 +6,10 @@ use crate::{
|
|||
SERVICES,
|
||||
};
|
||||
use abstraction::{KeyValueDatabaseEngine, KvTree};
|
||||
use base64::{engine::general_purpose, Engine};
|
||||
use directories::ProjectDirs;
|
||||
use lru_cache::LruCache;
|
||||
|
||||
use ruma::{
|
||||
events::{
|
||||
push_rules::{PushRulesEvent, PushRulesEventContent},
|
||||
|
@ -56,6 +58,7 @@ pub struct KeyValueDatabase {
|
|||
pub(super) userid_masterkeyid: Arc<dyn KvTree>,
|
||||
pub(super) userid_selfsigningkeyid: Arc<dyn KvTree>,
|
||||
pub(super) userid_usersigningkeyid: Arc<dyn KvTree>,
|
||||
pub(super) openidtoken_expiresatuserid: Arc<dyn KvTree>, // expiresatuserid = expiresat + userid
|
||||
|
||||
pub(super) userfilterid_filter: Arc<dyn KvTree>, // UserFilterId = UserId + FilterId
|
||||
|
||||
|
@ -70,8 +73,6 @@ pub struct KeyValueDatabase {
|
|||
pub(super) readreceiptid_readreceipt: Arc<dyn KvTree>, // ReadReceiptId = RoomId + Count + UserId
|
||||
pub(super) roomuserid_privateread: Arc<dyn KvTree>, // RoomUserId = Room + User, PrivateRead = Count
|
||||
pub(super) roomuserid_lastprivatereadupdate: Arc<dyn KvTree>, // LastPrivateReadUpdate = Count
|
||||
pub(super) typingid_userid: Arc<dyn KvTree>, // TypingId = RoomId + TimeoutTime + Count
|
||||
pub(super) roomid_lasttypingupdate: Arc<dyn KvTree>, // LastRoomTypingUpdate = Count
|
||||
pub(super) presenceid_presence: Arc<dyn KvTree>, // PresenceId = RoomId + Count + UserId
|
||||
pub(super) userid_lastpresenceupdate: Arc<dyn KvTree>, // LastPresenceUpdate = Count
|
||||
|
||||
|
@ -101,6 +102,8 @@ pub struct KeyValueDatabase {
|
|||
pub(super) userroomid_leftstate: Arc<dyn KvTree>,
|
||||
pub(super) roomuserid_leftcount: Arc<dyn KvTree>,
|
||||
|
||||
pub(super) alias_userid: Arc<dyn KvTree>, // User who created the alias
|
||||
|
||||
pub(super) disabledroomids: Arc<dyn KvTree>, // Rooms where incoming federation handling is disabled
|
||||
|
||||
pub(super) lazyloadedids: Arc<dyn KvTree>, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId
|
||||
|
@ -162,7 +165,6 @@ pub struct KeyValueDatabase {
|
|||
//pub pusher: pusher::PushData,
|
||||
pub(super) senderkey_pusher: Arc<dyn KvTree>,
|
||||
|
||||
pub(super) cached_registrations: Arc<RwLock<HashMap<String, serde_yaml::Value>>>,
|
||||
pub(super) pdu_cache: Mutex<LruCache<OwnedEventId, Arc<PduEvent>>>,
|
||||
pub(super) shorteventid_cache: Mutex<LruCache<u64, Arc<EventId>>>,
|
||||
pub(super) auth_chain_cache: Mutex<LruCache<Vec<u64>, Arc<HashSet<u64>>>>,
|
||||
|
@ -239,7 +241,7 @@ impl KeyValueDatabase {
|
|||
Self::check_db_setup(&config)?;
|
||||
|
||||
if !Path::new(&config.database_path).exists() {
|
||||
std::fs::create_dir_all(&config.database_path)
|
||||
fs::create_dir_all(&config.database_path)
|
||||
.map_err(|_| Error::BadConfig("Database folder doesn't exists and couldn't be created (e.g. due to missing permissions). Please create the database folder yourself."))?;
|
||||
}
|
||||
|
||||
|
@ -292,6 +294,7 @@ impl KeyValueDatabase {
|
|||
userid_masterkeyid: builder.open_tree("userid_masterkeyid")?,
|
||||
userid_selfsigningkeyid: builder.open_tree("userid_selfsigningkeyid")?,
|
||||
userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?,
|
||||
openidtoken_expiresatuserid: builder.open_tree("openidtoken_expiresatuserid")?,
|
||||
userfilterid_filter: builder.open_tree("userfilterid_filter")?,
|
||||
todeviceid_events: builder.open_tree("todeviceid_events")?,
|
||||
|
||||
|
@ -301,8 +304,6 @@ impl KeyValueDatabase {
|
|||
roomuserid_privateread: builder.open_tree("roomuserid_privateread")?, // "Private" read receipt
|
||||
roomuserid_lastprivatereadupdate: builder
|
||||
.open_tree("roomuserid_lastprivatereadupdate")?,
|
||||
typingid_userid: builder.open_tree("typingid_userid")?,
|
||||
roomid_lasttypingupdate: builder.open_tree("roomid_lasttypingupdate")?,
|
||||
presenceid_presence: builder.open_tree("presenceid_presence")?,
|
||||
userid_lastpresenceupdate: builder.open_tree("userid_lastpresenceupdate")?,
|
||||
pduid_pdu: builder.open_tree("pduid_pdu")?,
|
||||
|
@ -329,6 +330,8 @@ impl KeyValueDatabase {
|
|||
userroomid_leftstate: builder.open_tree("userroomid_leftstate")?,
|
||||
roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?,
|
||||
|
||||
alias_userid: builder.open_tree("alias_userid")?,
|
||||
|
||||
disabledroomids: builder.open_tree("disabledroomids")?,
|
||||
|
||||
lazyloadedids: builder.open_tree("lazyloadedids")?,
|
||||
|
@ -372,7 +375,6 @@ impl KeyValueDatabase {
|
|||
global: builder.open_tree("global")?,
|
||||
server_signingkeys: builder.open_tree("server_signingkeys")?,
|
||||
|
||||
cached_registrations: Arc::new(RwLock::new(HashMap::new())),
|
||||
pdu_cache: Mutex::new(LruCache::new(
|
||||
config
|
||||
.pdu_cache_capacity
|
||||
|
@ -409,11 +411,9 @@ impl KeyValueDatabase {
|
|||
// Matrix resource ownership is based on the server name; changing it
|
||||
// requires recreating the database from scratch.
|
||||
if services().users.count()? > 0 {
|
||||
let conduit_user =
|
||||
UserId::parse_with_server_name("conduit", services().globals.server_name())
|
||||
.expect("@conduit:server_name is valid");
|
||||
let conduit_user = services().globals.server_user();
|
||||
|
||||
if !services().users.exists(&conduit_user)? {
|
||||
if !services().users.exists(conduit_user)? {
|
||||
error!(
|
||||
"The {} server user does not exist, and the database is not new.",
|
||||
conduit_user
|
||||
|
@ -425,7 +425,7 @@ impl KeyValueDatabase {
|
|||
}
|
||||
|
||||
// If the database has any data, perform data migrations before starting
|
||||
let latest_database_version = 13;
|
||||
let latest_database_version = 16;
|
||||
|
||||
if services().users.count()? > 0 {
|
||||
// MIGRATIONS
|
||||
|
@ -851,8 +851,10 @@ impl KeyValueDatabase {
|
|||
let rule = rules_list.content.get(content_rule_transformation[0]);
|
||||
if rule.is_some() {
|
||||
let mut rule = rule.unwrap().clone();
|
||||
rule.rule_id = content_rule_transformation[1].to_owned();
|
||||
rules_list.content.remove(content_rule_transformation[0]);
|
||||
content_rule_transformation[1].clone_into(&mut rule.rule_id);
|
||||
rules_list
|
||||
.content
|
||||
.shift_remove(content_rule_transformation[0]);
|
||||
rules_list.content.insert(rule);
|
||||
}
|
||||
}
|
||||
|
@ -874,8 +876,8 @@ impl KeyValueDatabase {
|
|||
let rule = rules_list.underride.get(transformation[0]);
|
||||
if let Some(rule) = rule {
|
||||
let mut rule = rule.clone();
|
||||
rule.rule_id = transformation[1].to_owned();
|
||||
rules_list.underride.remove(transformation[0]);
|
||||
transformation[1].clone_into(&mut rule.rule_id);
|
||||
rules_list.underride.shift_remove(transformation[0]);
|
||||
rules_list.underride.insert(rule);
|
||||
}
|
||||
}
|
||||
|
@ -921,7 +923,7 @@ impl KeyValueDatabase {
|
|||
let mut account_data =
|
||||
serde_json::from_str::<PushRulesEvent>(raw_rules_list.get()).unwrap();
|
||||
|
||||
let user_default_rules = ruma::push::Ruleset::server_default(&user);
|
||||
let user_default_rules = Ruleset::server_default(&user);
|
||||
account_data
|
||||
.content
|
||||
.global
|
||||
|
@ -940,6 +942,86 @@ impl KeyValueDatabase {
|
|||
warn!("Migration: 12 -> 13 finished");
|
||||
}
|
||||
|
||||
if services().globals.database_version()? < 16 {
|
||||
// Reconstruct all media using the filesystem
|
||||
db.mediaid_file.clear().unwrap();
|
||||
|
||||
for file in fs::read_dir(services().globals.get_media_folder()).unwrap() {
|
||||
let file = file.unwrap();
|
||||
let mediaid = general_purpose::URL_SAFE_NO_PAD
|
||||
.decode(file.file_name().into_string().unwrap())
|
||||
.unwrap();
|
||||
|
||||
let mut parts = mediaid.rsplit(|&b| b == 0xff);
|
||||
|
||||
let mut removed_bytes = 0;
|
||||
|
||||
let content_type_bytes = parts.next().unwrap();
|
||||
removed_bytes += content_type_bytes.len() + 1;
|
||||
|
||||
let content_disposition_bytes = parts
|
||||
.next()
|
||||
.ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?;
|
||||
removed_bytes += content_disposition_bytes.len();
|
||||
|
||||
let mut content_disposition =
|
||||
utils::string_from_bytes(content_disposition_bytes).map_err(|_| {
|
||||
Error::bad_database("Content Disposition in mediaid_file is invalid.")
|
||||
})?;
|
||||
|
||||
if content_disposition.contains("filename=")
|
||||
&& !content_disposition.contains("filename=\"")
|
||||
{
|
||||
println!("{}", &content_disposition);
|
||||
content_disposition =
|
||||
content_disposition.replacen("filename=", "filename=\"", 1);
|
||||
content_disposition.push('"');
|
||||
println!("{}", &content_disposition);
|
||||
|
||||
let mut new_key = mediaid[..(mediaid.len() - removed_bytes)].to_vec();
|
||||
assert!(*new_key.last().unwrap() == 0xff);
|
||||
|
||||
let mut shorter_key = new_key.clone();
|
||||
shorter_key.extend(
|
||||
ruma::http_headers::ContentDisposition::new(
|
||||
ruma::http_headers::ContentDispositionType::Inline,
|
||||
)
|
||||
.to_string()
|
||||
.as_bytes(),
|
||||
);
|
||||
shorter_key.push(0xff);
|
||||
shorter_key.extend_from_slice(content_type_bytes);
|
||||
|
||||
new_key.extend_from_slice(content_disposition.to_string().as_bytes());
|
||||
new_key.push(0xff);
|
||||
new_key.extend_from_slice(content_type_bytes);
|
||||
|
||||
// Some file names are too long. Ignore those.
|
||||
match fs::rename(
|
||||
services().globals.get_media_file(&mediaid),
|
||||
services().globals.get_media_file(&new_key),
|
||||
) {
|
||||
Ok(_) => {
|
||||
db.mediaid_file.insert(&new_key, &[])?;
|
||||
}
|
||||
Err(_) => {
|
||||
fs::rename(
|
||||
services().globals.get_media_file(&mediaid),
|
||||
services().globals.get_media_file(&shorter_key),
|
||||
)
|
||||
.unwrap();
|
||||
db.mediaid_file.insert(&shorter_key, &[])?;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
db.mediaid_file.insert(&mediaid, &[])?;
|
||||
}
|
||||
}
|
||||
services().globals.bump_database_version(16)?;
|
||||
|
||||
warn!("Migration: 13 -> 16 finished");
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
services().globals.database_version().unwrap(),
|
||||
latest_database_version
|
||||
|
@ -1105,22 +1187,21 @@ impl KeyValueDatabase {
|
|||
|
||||
/// Sets the emergency password and push rules for the @conduit account in case emergency password is set
|
||||
fn set_emergency_access() -> Result<bool> {
|
||||
let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name())
|
||||
.expect("@conduit:server_name is a valid UserId");
|
||||
let conduit_user = services().globals.server_user();
|
||||
|
||||
services().users.set_password(
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
services().globals.emergency_password().as_deref(),
|
||||
)?;
|
||||
|
||||
let (ruleset, res) = match services().globals.emergency_password() {
|
||||
Some(_) => (Ruleset::server_default(&conduit_user), Ok(true)),
|
||||
Some(_) => (Ruleset::server_default(conduit_user), Ok(true)),
|
||||
None => (Ruleset::new(), Ok(false)),
|
||||
};
|
||||
|
||||
services().account_data.update(
|
||||
None,
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||
&serde_json::to_value(&GlobalAccountDataEvent {
|
||||
content: PushRulesEventContent { global: ruleset },
|
||||
|
|
13
src/lib.rs
13
src/lib.rs
|
@ -1,18 +1,13 @@
|
|||
#![warn(
|
||||
rust_2018_idioms,
|
||||
unused_qualifications,
|
||||
clippy::cloned_instead_of_copied,
|
||||
clippy::str_to_string
|
||||
)]
|
||||
#![allow(clippy::suspicious_else_formatting)]
|
||||
#![deny(clippy::dbg_macro)]
|
||||
|
||||
pub mod api;
|
||||
pub mod clap;
|
||||
mod config;
|
||||
mod database;
|
||||
mod service;
|
||||
mod utils;
|
||||
|
||||
// Not async due to services() being used in many closures, and async closures are not stable as of writing
|
||||
// This is the case for every other occurence of sync Mutex/RwLock, except for database related ones, where
|
||||
// the current maintainer (Timo) has asked to not modify those
|
||||
use std::sync::RwLock;
|
||||
|
||||
pub use api::ruma_wrapper::{Ruma, RumaResponse};
|
||||
|
|
115
src/main.rs
115
src/main.rs
|
@ -1,19 +1,11 @@
|
|||
#![warn(
|
||||
rust_2018_idioms,
|
||||
unused_qualifications,
|
||||
clippy::cloned_instead_of_copied,
|
||||
clippy::str_to_string,
|
||||
clippy::future_not_send
|
||||
)]
|
||||
#![allow(clippy::suspicious_else_formatting)]
|
||||
#![deny(clippy::dbg_macro)]
|
||||
|
||||
use std::{future::Future, io, net::SocketAddr, sync::atomic, time::Duration};
|
||||
|
||||
use axum::{
|
||||
body::Body,
|
||||
extract::{DefaultBodyLimit, FromRequestParts, MatchedPath},
|
||||
response::IntoResponse,
|
||||
routing::{get, on, MethodFilter},
|
||||
middleware::map_response,
|
||||
response::{IntoResponse, Response},
|
||||
routing::{any, get, on, MethodFilter},
|
||||
Router,
|
||||
};
|
||||
use axum_server::{bind, bind_rustls, tls_rustls::RustlsConfig, Handle as ServerHandle};
|
||||
|
@ -23,7 +15,7 @@ use figment::{
|
|||
Figment,
|
||||
};
|
||||
use http::{
|
||||
header::{self, HeaderName},
|
||||
header::{self, HeaderName, CONTENT_SECURITY_POLICY},
|
||||
Method, StatusCode, Uri,
|
||||
};
|
||||
use ruma::api::{
|
||||
|
@ -54,6 +46,8 @@ static GLOBAL: Jemalloc = Jemalloc;
|
|||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
clap::parse();
|
||||
|
||||
// Initialize config
|
||||
let raw_config =
|
||||
Figment::new()
|
||||
|
@ -63,7 +57,7 @@ async fn main() {
|
|||
))
|
||||
.nested(),
|
||||
)
|
||||
.merge(Env::prefixed("CONDUIT_").global());
|
||||
.merge(Env::prefixed("CONDUIT_").global().split("__"));
|
||||
|
||||
let config = match raw_config.extract::<Config>() {
|
||||
Ok(s) => s,
|
||||
|
@ -75,18 +69,18 @@ async fn main() {
|
|||
|
||||
config.warn_deprecated();
|
||||
|
||||
let log = format!("{},ruma_state_res=error,_=off,sled=off", config.log);
|
||||
|
||||
if config.allow_jaeger {
|
||||
opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new());
|
||||
let tracer = opentelemetry_jaeger::new_agent_pipeline()
|
||||
.with_auto_split_batch(true)
|
||||
.with_service_name("conduit")
|
||||
.install_batch(opentelemetry::runtime::Tokio)
|
||||
opentelemetry::global::set_text_map_propagator(
|
||||
opentelemetry_jaeger_propagator::Propagator::new(),
|
||||
);
|
||||
let tracer = opentelemetry_otlp::new_pipeline()
|
||||
.tracing()
|
||||
.with_exporter(opentelemetry_otlp::new_exporter().tonic())
|
||||
.install_batch(opentelemetry_sdk::runtime::Tokio)
|
||||
.unwrap();
|
||||
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
|
||||
|
||||
let filter_layer = match EnvFilter::try_new(&log) {
|
||||
let filter_layer = match EnvFilter::try_new(&config.log) {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
eprintln!(
|
||||
|
@ -113,7 +107,7 @@ async fn main() {
|
|||
} else {
|
||||
let registry = tracing_subscriber::Registry::default();
|
||||
let fmt_layer = tracing_subscriber::fmt::Layer::new();
|
||||
let filter_layer = match EnvFilter::try_new(&log) {
|
||||
let filter_layer = match EnvFilter::try_new(&config.log) {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
eprintln!("It looks like your config is invalid. The following error occured while parsing it: {e}");
|
||||
|
@ -151,6 +145,13 @@ async fn main() {
|
|||
}
|
||||
}
|
||||
|
||||
/// Adds additional headers to prevent any potential XSS attacks via the media repo
|
||||
async fn set_csp_header(response: Response) -> impl IntoResponse {
|
||||
(
|
||||
[(CONTENT_SECURITY_POLICY, "sandbox; default-src 'none'; script-src 'none'; plugin-types application/pdf; style-src 'unsafe-inline'; object-src 'self';")], response
|
||||
)
|
||||
}
|
||||
|
||||
async fn run_server() -> io::Result<()> {
|
||||
let config = &services().globals.config;
|
||||
let addr = SocketAddr::from((config.address, config.port));
|
||||
|
@ -191,6 +192,7 @@ async fn run_server() -> io::Result<()> {
|
|||
])
|
||||
.max_age(Duration::from_secs(86400)),
|
||||
)
|
||||
.layer(map_response(set_csp_header))
|
||||
.layer(DefaultBodyLimit::max(
|
||||
config
|
||||
.max_request_size
|
||||
|
@ -198,7 +200,7 @@ async fn run_server() -> io::Result<()> {
|
|||
.expect("failed to convert max request size"),
|
||||
));
|
||||
|
||||
let app = routes().layer(middlewares).into_make_service();
|
||||
let app = routes(config).layer(middlewares).into_make_service();
|
||||
let handle = ServerHandle::new();
|
||||
|
||||
tokio::spawn(shutdown_signal(handle.clone()));
|
||||
|
@ -226,10 +228,10 @@ async fn run_server() -> io::Result<()> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
async fn spawn_task<B: Send + 'static>(
|
||||
req: axum::http::Request<B>,
|
||||
next: axum::middleware::Next<B>,
|
||||
) -> std::result::Result<axum::response::Response, StatusCode> {
|
||||
async fn spawn_task(
|
||||
req: http::Request<Body>,
|
||||
next: axum::middleware::Next,
|
||||
) -> std::result::Result<Response, StatusCode> {
|
||||
if services().globals.shutdown.load(atomic::Ordering::Relaxed) {
|
||||
return Err(StatusCode::SERVICE_UNAVAILABLE);
|
||||
}
|
||||
|
@ -238,14 +240,14 @@ async fn spawn_task<B: Send + 'static>(
|
|||
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)
|
||||
}
|
||||
|
||||
async fn unrecognized_method<B>(
|
||||
req: axum::http::Request<B>,
|
||||
next: axum::middleware::Next<B>,
|
||||
) -> std::result::Result<axum::response::Response, StatusCode> {
|
||||
async fn unrecognized_method(
|
||||
req: http::Request<Body>,
|
||||
next: axum::middleware::Next,
|
||||
) -> std::result::Result<Response, StatusCode> {
|
||||
let method = req.method().clone();
|
||||
let uri = req.uri().clone();
|
||||
let inner = next.run(req).await;
|
||||
if inner.status() == axum::http::StatusCode::METHOD_NOT_ALLOWED {
|
||||
if inner.status() == StatusCode::METHOD_NOT_ALLOWED {
|
||||
warn!("Method not allowed: {method} {uri}");
|
||||
return Ok(RumaResponse(UiaaResponse::MatrixError(RumaError {
|
||||
body: ErrorBody::Standard {
|
||||
|
@ -259,8 +261,8 @@ async fn unrecognized_method<B>(
|
|||
Ok(inner)
|
||||
}
|
||||
|
||||
fn routes() -> Router {
|
||||
Router::new()
|
||||
fn routes(config: &Config) -> Router {
|
||||
let router = Router::new()
|
||||
.ruma_route(client_server::get_supported_versions_route)
|
||||
.ruma_route(client_server::get_register_available_route)
|
||||
.ruma_route(client_server::register_route)
|
||||
|
@ -287,6 +289,7 @@ fn routes() -> Router {
|
|||
.ruma_route(client_server::get_room_aliases_route)
|
||||
.ruma_route(client_server::get_filter_route)
|
||||
.ruma_route(client_server::create_filter_route)
|
||||
.ruma_route(client_server::create_openid_token_route)
|
||||
.ruma_route(client_server::set_global_account_data_route)
|
||||
.ruma_route(client_server::set_room_account_data_route)
|
||||
.ruma_route(client_server::get_global_account_data_route)
|
||||
|
@ -376,10 +379,14 @@ fn routes() -> Router {
|
|||
.ruma_route(client_server::turn_server_route)
|
||||
.ruma_route(client_server::send_event_to_device_route)
|
||||
.ruma_route(client_server::get_media_config_route)
|
||||
.ruma_route(client_server::get_media_config_auth_route)
|
||||
.ruma_route(client_server::create_content_route)
|
||||
.ruma_route(client_server::get_content_route)
|
||||
.ruma_route(client_server::get_content_auth_route)
|
||||
.ruma_route(client_server::get_content_as_filename_route)
|
||||
.ruma_route(client_server::get_content_as_filename_auth_route)
|
||||
.ruma_route(client_server::get_content_thumbnail_route)
|
||||
.ruma_route(client_server::get_content_thumbnail_auth_route)
|
||||
.ruma_route(client_server::get_devices_route)
|
||||
.ruma_route(client_server::get_device_route)
|
||||
.ruma_route(client_server::update_device_route)
|
||||
|
@ -400,6 +407,20 @@ fn routes() -> Router {
|
|||
.ruma_route(client_server::get_relating_events_with_rel_type_route)
|
||||
.ruma_route(client_server::get_relating_events_route)
|
||||
.ruma_route(client_server::get_hierarchy_route)
|
||||
.ruma_route(client_server::well_known_client)
|
||||
.route(
|
||||
"/_matrix/client/r0/rooms/:room_id/initialSync",
|
||||
get(initial_sync),
|
||||
)
|
||||
.route(
|
||||
"/_matrix/client/v3/rooms/:room_id/initialSync",
|
||||
get(initial_sync),
|
||||
)
|
||||
.route("/", get(it_works))
|
||||
.fallback(not_found);
|
||||
|
||||
if config.allow_federation {
|
||||
router
|
||||
.ruma_route(server_server::get_server_version_route)
|
||||
.route(
|
||||
"/_matrix/key/v2/server",
|
||||
|
@ -423,20 +444,20 @@ fn routes() -> Router {
|
|||
.ruma_route(server_server::create_join_event_v2_route)
|
||||
.ruma_route(server_server::create_invite_route)
|
||||
.ruma_route(server_server::get_devices_route)
|
||||
.ruma_route(server_server::get_content_route)
|
||||
.ruma_route(server_server::get_content_thumbnail_route)
|
||||
.ruma_route(server_server::get_room_information_route)
|
||||
.ruma_route(server_server::get_profile_information_route)
|
||||
.ruma_route(server_server::get_keys_route)
|
||||
.ruma_route(server_server::claim_keys_route)
|
||||
.route(
|
||||
"/_matrix/client/r0/rooms/:room_id/initialSync",
|
||||
get(initial_sync),
|
||||
)
|
||||
.route(
|
||||
"/_matrix/client/v3/rooms/:room_id/initialSync",
|
||||
get(initial_sync),
|
||||
)
|
||||
.route("/", get(it_works))
|
||||
.fallback(not_found)
|
||||
.ruma_route(server_server::get_openid_userinfo_route)
|
||||
.ruma_route(server_server::well_known_server)
|
||||
} else {
|
||||
router
|
||||
.route("/_matrix/federation/*path", any(federation_disabled))
|
||||
.route("/_matrix/key/*path", any(federation_disabled))
|
||||
.route("/.well-known/matrix/server", any(federation_disabled))
|
||||
}
|
||||
}
|
||||
|
||||
async fn shutdown_signal(handle: ServerHandle) {
|
||||
|
@ -473,6 +494,10 @@ async fn shutdown_signal(handle: ServerHandle) {
|
|||
let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]);
|
||||
}
|
||||
|
||||
async fn federation_disabled(_: Uri) -> impl IntoResponse {
|
||||
Error::bad_config("Federation is disabled.")
|
||||
}
|
||||
|
||||
async fn not_found(uri: Uri) -> impl IntoResponse {
|
||||
warn!("Not found: {uri}");
|
||||
Error::BadRequest(ErrorKind::Unrecognized, "Unrecognized request")
|
||||
|
|
|
@ -1,13 +1,9 @@
|
|||
use std::{
|
||||
collections::BTreeMap,
|
||||
convert::{TryFrom, TryInto},
|
||||
sync::{Arc, RwLock},
|
||||
time::Instant,
|
||||
};
|
||||
use std::{collections::BTreeMap, convert::TryFrom, sync::Arc, time::Instant};
|
||||
|
||||
use clap::Parser;
|
||||
use regex::Regex;
|
||||
use ruma::{
|
||||
api::appservice::Registration,
|
||||
events::{
|
||||
room::{
|
||||
canonical_alias::RoomCanonicalAliasEventContent,
|
||||
|
@ -23,10 +19,11 @@ use ruma::{
|
|||
},
|
||||
TimelineEventType,
|
||||
},
|
||||
EventId, OwnedRoomAliasId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
|
||||
EventId, MilliSecondsSinceUnixEpoch, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId,
|
||||
RoomVersionId, ServerName, UserId,
|
||||
};
|
||||
use serde_json::value::to_raw_value;
|
||||
use tokio::sync::{mpsc, Mutex, MutexGuard};
|
||||
use tokio::sync::{mpsc, Mutex, RwLock};
|
||||
|
||||
use crate::{
|
||||
api::client_server::{leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH},
|
||||
|
@ -50,7 +47,7 @@ enum AdminCommand {
|
|||
/// Registering a new bridge using the ID of an existing bridge will replace
|
||||
/// the old one.
|
||||
///
|
||||
/// [commandbody]
|
||||
/// [commandbody]()
|
||||
/// # ```
|
||||
/// # yaml content here
|
||||
/// # ```
|
||||
|
@ -76,6 +73,12 @@ enum AdminCommand {
|
|||
/// List all rooms we are currently handling an incoming pdu from
|
||||
IncomingFederation,
|
||||
|
||||
/// Removes an alias from the server
|
||||
RemoveAlias {
|
||||
/// The alias to be removed
|
||||
alias: Box<RoomAliasId>,
|
||||
},
|
||||
|
||||
/// Deactivate a user
|
||||
///
|
||||
/// User will not be removed from all rooms by default.
|
||||
|
@ -96,7 +99,7 @@ enum AdminCommand {
|
|||
/// Removing a mass amount of users from a room may cause a significant amount of leave events.
|
||||
/// The time to leave rooms may depend significantly on joined rooms and servers.
|
||||
///
|
||||
/// [commandbody]
|
||||
/// [commandbody]()
|
||||
/// # ```
|
||||
/// # User list here
|
||||
/// # ```
|
||||
|
@ -121,7 +124,7 @@ enum AdminCommand {
|
|||
/// The PDU event is only checked for validity and is not added to the
|
||||
/// database.
|
||||
///
|
||||
/// [commandbody]
|
||||
/// [commandbody]()
|
||||
/// # ```
|
||||
/// # PDU json content here
|
||||
/// # ```
|
||||
|
@ -159,24 +162,23 @@ enum AdminCommand {
|
|||
password: Option<String>,
|
||||
},
|
||||
|
||||
/// Temporarily toggle user registration by passing either true or false as an argument, does not persist between restarts
|
||||
AllowRegistration { status: Option<bool> },
|
||||
|
||||
/// Disables incoming federation handling for a room.
|
||||
DisableRoom { room_id: Box<RoomId> },
|
||||
/// Enables incoming federation handling for a room again.
|
||||
EnableRoom { room_id: Box<RoomId> },
|
||||
|
||||
/// Verify json signatures
|
||||
/// [commandbody]
|
||||
/// # ```
|
||||
/// # json here
|
||||
/// # ```
|
||||
/// Sign a json object using Conduit's signing keys, putting the json in a codeblock
|
||||
SignJson,
|
||||
|
||||
/// Verify json signatures
|
||||
/// [commandbody]
|
||||
/// # ```
|
||||
/// # json here
|
||||
/// # ```
|
||||
/// Verify json signatures, putting the json in a codeblock
|
||||
VerifyJson,
|
||||
|
||||
/// Parses a JSON object as an event then creates a hash and signs it, putting a room
|
||||
/// version as an argument, and the json in a codeblock
|
||||
HashAndSignEvent { room_version_id: RoomVersionId },
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
|
@ -211,41 +213,9 @@ impl Service {
|
|||
// TODO: Use futures when we have long admin commands
|
||||
//let mut futures = FuturesUnordered::new();
|
||||
|
||||
let conduit_user = UserId::parse(format!("@conduit:{}", services().globals.server_name()))
|
||||
.expect("@conduit:server_name is valid");
|
||||
|
||||
let conduit_room = services()
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_local_alias(
|
||||
format!("#admins:{}", services().globals.server_name())
|
||||
.as_str()
|
||||
.try_into()
|
||||
.expect("#admins:server_name is a valid room alias"),
|
||||
)
|
||||
.expect("Database data for admin room alias must be valid")
|
||||
.expect("Admin room must exist");
|
||||
|
||||
let send_message = |message: RoomMessageEventContent, mutex_lock: &MutexGuard<'_, ()>| {
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMessage,
|
||||
content: to_raw_value(&message)
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: None,
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&conduit_room,
|
||||
mutex_lock,
|
||||
)
|
||||
.unwrap();
|
||||
};
|
||||
let conduit_user = services().globals.server_user();
|
||||
|
||||
if let Ok(Some(conduit_room)) = services().admin.get_admin_room() {
|
||||
loop {
|
||||
tokio::select! {
|
||||
Some(event) = receiver.recv() => {
|
||||
|
@ -258,16 +228,32 @@ impl Service {
|
|||
services().globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(conduit_room.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
send_message(message_content, &state_lock);
|
||||
|
||||
drop(state_lock);
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMessage,
|
||||
content: to_raw_value(&message_content)
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: None,
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
conduit_user,
|
||||
&conduit_room,
|
||||
&state_lock,
|
||||
)
|
||||
.await.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -351,10 +337,9 @@ impl Service {
|
|||
if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```"
|
||||
{
|
||||
let appservice_config = body[1..body.len() - 1].join("\n");
|
||||
let parsed_config =
|
||||
serde_yaml::from_str::<serde_yaml::Value>(&appservice_config);
|
||||
let parsed_config = serde_yaml::from_str::<Registration>(&appservice_config);
|
||||
match parsed_config {
|
||||
Ok(yaml) => match services().appservice.register_appservice(yaml) {
|
||||
Ok(yaml) => match services().appservice.register_appservice(yaml).await {
|
||||
Ok(id) => RoomMessageEventContent::text_plain(format!(
|
||||
"Appservice registered with ID: {id}."
|
||||
)),
|
||||
|
@ -377,6 +362,7 @@ impl Service {
|
|||
} => match services()
|
||||
.appservice
|
||||
.unregister_appservice(&appservice_identifier)
|
||||
.await
|
||||
{
|
||||
Ok(()) => RoomMessageEventContent::text_plain("Appservice unregistered."),
|
||||
Err(e) => RoomMessageEventContent::text_plain(format!(
|
||||
|
@ -384,25 +370,13 @@ impl Service {
|
|||
)),
|
||||
},
|
||||
AdminCommand::ListAppservices => {
|
||||
if let Ok(appservices) = services()
|
||||
.appservice
|
||||
.iter_ids()
|
||||
.map(|ids| ids.collect::<Vec<_>>())
|
||||
{
|
||||
let count = appservices.len();
|
||||
let appservices = services().appservice.iter_ids().await;
|
||||
let output = format!(
|
||||
"Appservices ({}): {}",
|
||||
count,
|
||||
appservices
|
||||
.into_iter()
|
||||
.filter_map(|r| r.ok())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
appservices.len(),
|
||||
appservices.join(", ")
|
||||
);
|
||||
RoomMessageEventContent::text_plain(output)
|
||||
} else {
|
||||
RoomMessageEventContent::text_plain("Failed to get appservices.")
|
||||
}
|
||||
}
|
||||
AdminCommand::ListRooms => {
|
||||
let room_ids = services().rooms.metadata.iter_ids();
|
||||
|
@ -434,11 +408,7 @@ impl Service {
|
|||
Err(e) => RoomMessageEventContent::text_plain(e.to_string()),
|
||||
},
|
||||
AdminCommand::IncomingFederation => {
|
||||
let map = services()
|
||||
.globals
|
||||
.roomid_federationhandletime
|
||||
.read()
|
||||
.unwrap();
|
||||
let map = services().globals.roomid_federationhandletime.read().await;
|
||||
let mut msg: String = format!("Handling {} incoming pdus:\n", map.len());
|
||||
|
||||
for (r, (e, i)) in map.iter() {
|
||||
|
@ -552,7 +522,7 @@ impl Service {
|
|||
}
|
||||
}
|
||||
AdminCommand::MemoryUsage => {
|
||||
let response1 = services().memory_usage();
|
||||
let response1 = services().memory_usage().await;
|
||||
let response2 = services().globals.db.memory_usage();
|
||||
|
||||
RoomMessageEventContent::text_plain(format!(
|
||||
|
@ -565,7 +535,7 @@ impl Service {
|
|||
RoomMessageEventContent::text_plain("Done.")
|
||||
}
|
||||
AdminCommand::ClearServiceCaches { amount } => {
|
||||
services().clear_caches(amount);
|
||||
services().clear_caches(amount).await;
|
||||
|
||||
RoomMessageEventContent::text_plain("Done.")
|
||||
}
|
||||
|
@ -586,6 +556,13 @@ impl Service {
|
|||
}
|
||||
};
|
||||
|
||||
// Checks if user is local
|
||||
if user_id.server_name() != services().globals.server_name() {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"The specified user is not from this server!",
|
||||
));
|
||||
};
|
||||
|
||||
// Check if the specified user is valid
|
||||
if !services().users.exists(&user_id)?
|
||||
|| user_id
|
||||
|
@ -629,6 +606,14 @@ impl Service {
|
|||
)))
|
||||
}
|
||||
};
|
||||
|
||||
// Checks if user is local
|
||||
if user_id.server_name() != services().globals.server_name() {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"The specified user is not from this server!",
|
||||
));
|
||||
};
|
||||
|
||||
if user_id.is_historical() {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Userid {user_id} is not allowed due to historical"
|
||||
|
@ -676,6 +661,24 @@ impl Service {
|
|||
"Created user with user_id: {user_id} and password: {password}"
|
||||
))
|
||||
}
|
||||
AdminCommand::AllowRegistration { status } => {
|
||||
if let Some(status) = status {
|
||||
services().globals.set_registration(status).await;
|
||||
RoomMessageEventContent::text_plain(if status {
|
||||
"Registration is now enabled"
|
||||
} else {
|
||||
"Registration is now disabled"
|
||||
})
|
||||
} else {
|
||||
RoomMessageEventContent::text_plain(
|
||||
if services().globals.allow_registration().await {
|
||||
"Registration is currently enabled"
|
||||
} else {
|
||||
"Registration is currently disabled"
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
AdminCommand::DisableRoom { room_id } => {
|
||||
services().rooms.metadata.disable_room(&room_id, true)?;
|
||||
RoomMessageEventContent::text_plain("Room disabled.")
|
||||
|
@ -689,7 +692,15 @@ impl Service {
|
|||
user_id,
|
||||
} => {
|
||||
let user_id = Arc::<UserId>::from(user_id);
|
||||
if services().users.exists(&user_id)? {
|
||||
if !services().users.exists(&user_id)? {
|
||||
RoomMessageEventContent::text_plain(format!(
|
||||
"User {user_id} doesn't exist on this server"
|
||||
))
|
||||
} else if user_id.server_name() != services().globals.server_name() {
|
||||
RoomMessageEventContent::text_plain(format!(
|
||||
"User {user_id} is not from this server"
|
||||
))
|
||||
} else {
|
||||
RoomMessageEventContent::text_plain(format!(
|
||||
"Making {user_id} leave all rooms before deactivation..."
|
||||
));
|
||||
|
@ -703,30 +714,76 @@ impl Service {
|
|||
RoomMessageEventContent::text_plain(format!(
|
||||
"User {user_id} has been deactivated"
|
||||
))
|
||||
} else {
|
||||
RoomMessageEventContent::text_plain(format!(
|
||||
"User {user_id} doesn't exist on this server"
|
||||
))
|
||||
}
|
||||
}
|
||||
AdminCommand::DeactivateAll { leave_rooms, force } => {
|
||||
if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```"
|
||||
{
|
||||
let usernames = body.clone().drain(1..body.len() - 1).collect::<Vec<_>>();
|
||||
let users = body.clone().drain(1..body.len() - 1).collect::<Vec<_>>();
|
||||
|
||||
let mut user_ids: Vec<&UserId> = Vec::new();
|
||||
let mut user_ids = Vec::new();
|
||||
let mut remote_ids = Vec::new();
|
||||
let mut non_existant_ids = Vec::new();
|
||||
let mut invalid_users = Vec::new();
|
||||
|
||||
for &username in &usernames {
|
||||
match <&UserId>::try_from(username) {
|
||||
Ok(user_id) => user_ids.push(user_id),
|
||||
for &user in &users {
|
||||
match <&UserId>::try_from(user) {
|
||||
Ok(user_id) => {
|
||||
if user_id.server_name() != services().globals.server_name() {
|
||||
remote_ids.push(user_id)
|
||||
} else if !services().users.exists(user_id)? {
|
||||
non_existant_ids.push(user_id)
|
||||
} else {
|
||||
user_ids.push(user_id)
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"{username} is not a valid username"
|
||||
)))
|
||||
invalid_users.push(user);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut markdown_message = String::new();
|
||||
let mut html_message = String::new();
|
||||
if !invalid_users.is_empty() {
|
||||
markdown_message.push_str("The following user ids are not valid:\n```\n");
|
||||
html_message.push_str("The following user ids are not valid:\n<pre>\n");
|
||||
for invalid_user in invalid_users {
|
||||
markdown_message.push_str(&format!("{invalid_user}\n"));
|
||||
html_message.push_str(&format!("{invalid_user}\n"));
|
||||
}
|
||||
markdown_message.push_str("```\n\n");
|
||||
html_message.push_str("</pre>\n\n");
|
||||
}
|
||||
if !remote_ids.is_empty() {
|
||||
markdown_message
|
||||
.push_str("The following users are not from this server:\n```\n");
|
||||
html_message
|
||||
.push_str("The following users are not from this server:\n<pre>\n");
|
||||
for remote_id in remote_ids {
|
||||
markdown_message.push_str(&format!("{remote_id}\n"));
|
||||
html_message.push_str(&format!("{remote_id}\n"));
|
||||
}
|
||||
markdown_message.push_str("```\n\n");
|
||||
html_message.push_str("</pre>\n\n");
|
||||
}
|
||||
if !non_existant_ids.is_empty() {
|
||||
markdown_message.push_str("The following users do not exist:\n```\n");
|
||||
html_message.push_str("The following users do not exist:\n<pre>\n");
|
||||
for non_existant_id in non_existant_ids {
|
||||
markdown_message.push_str(&format!("{non_existant_id}\n"));
|
||||
html_message.push_str(&format!("{non_existant_id}\n"));
|
||||
}
|
||||
markdown_message.push_str("```\n\n");
|
||||
html_message.push_str("</pre>\n\n");
|
||||
}
|
||||
if !markdown_message.is_empty() {
|
||||
return Ok(RoomMessageEventContent::text_html(
|
||||
markdown_message,
|
||||
html_message,
|
||||
));
|
||||
}
|
||||
|
||||
let mut deactivation_count = 0;
|
||||
let mut admins = Vec::new();
|
||||
|
||||
|
@ -803,15 +860,46 @@ impl Service {
|
|||
services()
|
||||
.rooms
|
||||
.event_handler
|
||||
// Generally we shouldn't be checking against expired keys unless required, so in the admin
|
||||
// room it might be best to not allow expired keys
|
||||
.fetch_required_signing_keys(&value, &pub_key_map)
|
||||
.await?;
|
||||
|
||||
let pub_key_map = pub_key_map.read().unwrap();
|
||||
match ruma::signatures::verify_json(&pub_key_map, &value) {
|
||||
Ok(_) => RoomMessageEventContent::text_plain("Signature correct"),
|
||||
Err(e) => RoomMessageEventContent::text_plain(format!(
|
||||
let mut expired_key_map = BTreeMap::new();
|
||||
let mut valid_key_map = BTreeMap::new();
|
||||
|
||||
for (server, keys) in pub_key_map.into_inner().into_iter() {
|
||||
if keys.valid_until_ts > MilliSecondsSinceUnixEpoch::now() {
|
||||
valid_key_map.insert(
|
||||
server,
|
||||
keys.verify_keys
|
||||
.into_iter()
|
||||
.map(|(id, key)| (id, key.key))
|
||||
.collect(),
|
||||
);
|
||||
} else {
|
||||
expired_key_map.insert(
|
||||
server,
|
||||
keys.verify_keys
|
||||
.into_iter()
|
||||
.map(|(id, key)| (id, key.key))
|
||||
.collect(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if ruma::signatures::verify_json(&valid_key_map, &value).is_ok() {
|
||||
RoomMessageEventContent::text_plain("Signature correct")
|
||||
} else if let Err(e) =
|
||||
ruma::signatures::verify_json(&expired_key_map, &value)
|
||||
{
|
||||
RoomMessageEventContent::text_plain(format!(
|
||||
"Signature verification failed: {e}"
|
||||
)),
|
||||
))
|
||||
} else {
|
||||
RoomMessageEventContent::text_plain(
|
||||
"Signature correct (with expired keys)",
|
||||
)
|
||||
}
|
||||
}
|
||||
Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json: {e}")),
|
||||
|
@ -822,6 +910,61 @@ impl Service {
|
|||
)
|
||||
}
|
||||
}
|
||||
AdminCommand::HashAndSignEvent { room_version_id } => {
|
||||
if body.len() > 2
|
||||
// Language may be specified as part of the codeblock (e.g. "```json")
|
||||
&& body[0].trim().starts_with("```")
|
||||
&& body.last().unwrap().trim() == "```"
|
||||
{
|
||||
let string = body[1..body.len() - 1].join("\n");
|
||||
match serde_json::from_str(&string) {
|
||||
Ok(mut value) => {
|
||||
if let Err(e) = ruma::signatures::hash_and_sign_event(
|
||||
services().globals.server_name().as_str(),
|
||||
services().globals.keypair(),
|
||||
&mut value,
|
||||
&room_version_id,
|
||||
) {
|
||||
RoomMessageEventContent::text_plain(format!("Invalid event: {e}"))
|
||||
} else {
|
||||
let json_text = serde_json::to_string_pretty(&value)
|
||||
.expect("canonical json is valid json");
|
||||
RoomMessageEventContent::text_plain(json_text)
|
||||
}
|
||||
}
|
||||
Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json: {e}")),
|
||||
}
|
||||
} else {
|
||||
RoomMessageEventContent::text_plain(
|
||||
"Expected code block in command body. Add --help for details.",
|
||||
)
|
||||
}
|
||||
}
|
||||
AdminCommand::RemoveAlias { alias } => {
|
||||
if alias.server_name() != services().globals.server_name() {
|
||||
RoomMessageEventContent::text_plain(
|
||||
"Cannot remove alias which is not from this server",
|
||||
)
|
||||
} else if services()
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_local_alias(&alias)?
|
||||
.is_none()
|
||||
{
|
||||
RoomMessageEventContent::text_plain("No such alias exists")
|
||||
} else {
|
||||
// We execute this as the server user for two reasons
|
||||
// 1. If the user can execute commands in the admin room, they can always remove the alias.
|
||||
// 2. In the future, we are likely going to be able to allow users to execute commands via
|
||||
// other methods, such as IPC, which would lead to us not knowing their user id
|
||||
|
||||
services()
|
||||
.rooms
|
||||
.alias
|
||||
.remove_alias(&alias, services().globals.server_user())?;
|
||||
RoomMessageEventContent::text_plain("Alias removed sucessfully")
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Ok(reply_message_content)
|
||||
|
@ -858,12 +1001,15 @@ impl Service {
|
|||
.expect("Regex compilation should not fail");
|
||||
let text = re.replace_all(&text, "<code>$1</code>: $4");
|
||||
|
||||
// Look for a `[commandbody]` tag. If it exists, use all lines below it that
|
||||
// Look for a `[commandbody]()` tag. If it exists, use all lines below it that
|
||||
// start with a `#` in the USAGE section.
|
||||
let mut text_lines: Vec<&str> = text.lines().collect();
|
||||
let mut command_body = String::new();
|
||||
|
||||
if let Some(line_index) = text_lines.iter().position(|line| *line == "[commandbody]") {
|
||||
if let Some(line_index) = text_lines
|
||||
.iter()
|
||||
.position(|line| *line == "[commandbody]()")
|
||||
{
|
||||
text_lines.remove(line_index);
|
||||
|
||||
while text_lines
|
||||
|
@ -919,40 +1065,60 @@ impl Service {
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
// Create a user for the server
|
||||
let conduit_user =
|
||||
UserId::parse_with_server_name("conduit", services().globals.server_name())
|
||||
.expect("@conduit:server_name is valid");
|
||||
let conduit_user = services().globals.server_user();
|
||||
|
||||
services().users.create(&conduit_user, None)?;
|
||||
services().users.create(conduit_user, None)?;
|
||||
|
||||
let mut content = RoomCreateEventContent::new_v1(conduit_user.clone());
|
||||
let room_version = services().globals.default_room_version();
|
||||
let mut content = match room_version {
|
||||
RoomVersionId::V1
|
||||
| RoomVersionId::V2
|
||||
| RoomVersionId::V3
|
||||
| RoomVersionId::V4
|
||||
| RoomVersionId::V5
|
||||
| RoomVersionId::V6
|
||||
| RoomVersionId::V7
|
||||
| RoomVersionId::V8
|
||||
| RoomVersionId::V9
|
||||
| RoomVersionId::V10 => RoomCreateEventContent::new_v1(conduit_user.to_owned()),
|
||||
RoomVersionId::V11 => RoomCreateEventContent::new_v11(),
|
||||
_ => unreachable!("Validity of room version already checked"),
|
||||
};
|
||||
content.federate = true;
|
||||
content.predecessor = None;
|
||||
content.room_version = services().globals.default_room_version();
|
||||
content.room_version = room_version;
|
||||
|
||||
// 1. The room create event
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomCreate,
|
||||
content: to_raw_value(&content).expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 2. Make conduit bot join
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
|
@ -969,17 +1135,22 @@ impl Service {
|
|||
unsigned: None,
|
||||
state_key: Some(conduit_user.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 3. Power levels
|
||||
let mut users = BTreeMap::new();
|
||||
users.insert(conduit_user.clone(), 100.into());
|
||||
users.insert(conduit_user.to_owned(), 100.into());
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomPowerLevels,
|
||||
content: to_raw_value(&RoomPowerLevelsEventContent {
|
||||
|
@ -990,14 +1161,19 @@ impl Service {
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 4.1 Join Rules
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomJoinRules,
|
||||
content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite))
|
||||
|
@ -1005,14 +1181,19 @@ impl Service {
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 4.2 History Visibility
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomHistoryVisibility,
|
||||
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
|
||||
|
@ -1022,44 +1203,61 @@ impl Service {
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 4.3 Guest Access
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomGuestAccess,
|
||||
content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden))
|
||||
content: to_raw_value(&RoomGuestAccessEventContent::new(
|
||||
GuestAccess::Forbidden,
|
||||
))
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 5. Events implied by name and topic
|
||||
let room_name = format!("{} Admin Room", services().globals.server_name());
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomName,
|
||||
content: to_raw_value(&RoomNameEventContent::new(Some(room_name)))
|
||||
content: to_raw_value(&RoomNameEventContent::new(room_name))
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomTopic,
|
||||
content: to_raw_value(&RoomTopicEventContent {
|
||||
|
@ -1069,18 +1267,21 @@ impl Service {
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 6. Room alias
|
||||
let alias: OwnedRoomAliasId = format!("#admins:{}", services().globals.server_name())
|
||||
.try_into()
|
||||
.expect("#admins:server_name is a valid alias name");
|
||||
let alias: OwnedRoomAliasId = services().globals.admin_alias().to_owned();
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomCanonicalAlias,
|
||||
content: to_raw_value(&RoomCanonicalAliasEventContent {
|
||||
|
@ -1091,17 +1292,32 @@ impl Service {
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
services().rooms.alias.set_alias(&alias, &room_id)?;
|
||||
services()
|
||||
.rooms
|
||||
.alias
|
||||
.set_alias(&alias, &room_id, conduit_user)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Gets the room ID of the admin room
|
||||
///
|
||||
/// Errors are propagated from the database, and will have None if there is no admin room
|
||||
pub(crate) fn get_admin_room(&self) -> Result<Option<OwnedRoomId>> {
|
||||
services()
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_local_alias(services().globals.admin_alias())
|
||||
}
|
||||
|
||||
/// Invite the user to the conduit admin room.
|
||||
///
|
||||
/// In conduit, this is equivalent to granting admin privileges.
|
||||
|
@ -1110,34 +1326,26 @@ impl Service {
|
|||
user_id: &UserId,
|
||||
displayname: String,
|
||||
) -> Result<()> {
|
||||
let admin_room_alias: Box<RoomAliasId> =
|
||||
format!("#admins:{}", services().globals.server_name())
|
||||
.try_into()
|
||||
.expect("#admins:server_name is a valid alias name");
|
||||
let room_id = services()
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_local_alias(&admin_room_alias)?
|
||||
.expect("Admin room must exist");
|
||||
|
||||
if let Some(room_id) = services().admin.get_admin_room()? {
|
||||
let mutex_state = Arc::clone(
|
||||
services()
|
||||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
// Use the server user to grant the new admin's power level
|
||||
let conduit_user =
|
||||
UserId::parse_with_server_name("conduit", services().globals.server_name())
|
||||
.expect("@conduit:server_name is valid");
|
||||
let conduit_user = services().globals.server_user();
|
||||
|
||||
// Invite and join the real user
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
|
@ -1154,12 +1362,17 @@ impl Service {
|
|||
unsigned: None,
|
||||
state_key: Some(user_id.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
)
|
||||
.await?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
|
@ -1176,18 +1389,23 @@ impl Service {
|
|||
unsigned: None,
|
||||
state_key: Some(user_id.to_string()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
user_id,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Set power level
|
||||
let mut users = BTreeMap::new();
|
||||
users.insert(conduit_user.to_owned(), 100.into());
|
||||
users.insert(user_id.to_owned(), 100.into());
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomPowerLevels,
|
||||
content: to_raw_value(&RoomPowerLevelsEventContent {
|
||||
|
@ -1198,11 +1416,13 @@ impl Service {
|
|||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Send welcome message
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
|
@ -1216,14 +1436,24 @@ impl Service {
|
|||
unsigned: None,
|
||||
state_key: None,
|
||||
redacts: None,
|
||||
timestamp: None,
|
||||
},
|
||||
&conduit_user,
|
||||
conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
|
||||
).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Checks whether a given user is an admin of this server
|
||||
pub fn user_is_admin(&self, user_id: &UserId) -> Result<bool> {
|
||||
let Some(admin_room) = self.get_admin_room()? else {
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
services().rooms.state_cache.is_joined(user_id, &admin_room)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
use ruma::api::appservice::Registration;
|
||||
|
||||
use crate::Result;
|
||||
|
||||
pub trait Data: Send + Sync {
|
||||
/// Registers an appservice and returns the ID to the caller
|
||||
fn register_appservice(&self, yaml: serde_yaml::Value) -> Result<String>;
|
||||
fn register_appservice(&self, yaml: Registration) -> Result<String>;
|
||||
|
||||
/// Remove an appservice registration
|
||||
///
|
||||
|
@ -11,9 +13,9 @@ pub trait Data: Send + Sync {
|
|||
/// * `service_name` - the name you send to register the service previously
|
||||
fn unregister_appservice(&self, service_name: &str) -> Result<()>;
|
||||
|
||||
fn get_registration(&self, id: &str) -> Result<Option<serde_yaml::Value>>;
|
||||
fn get_registration(&self, id: &str) -> Result<Option<Registration>>;
|
||||
|
||||
fn iter_ids<'a>(&'a self) -> Result<Box<dyn Iterator<Item = Result<String>> + 'a>>;
|
||||
|
||||
fn all(&self) -> Result<Vec<(String, serde_yaml::Value)>>;
|
||||
fn all(&self) -> Result<Vec<(String, Registration)>>;
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue