Merge branch 'next' into 'room-v11'
# Conflicts: # src/service/rooms/timeline/mod.rs # src/utils/error.rs
This commit is contained in:
commit
b5e21f761b
109 changed files with 4239 additions and 3708 deletions
15
.editorconfig
Normal file
15
.editorconfig
Normal file
|
@ -0,0 +1,15 @@
|
|||
# EditorConfig is awesome: https://EditorConfig.org
|
||||
|
||||
root = true
|
||||
|
||||
[*]
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
tab_width = 4
|
||||
indent_size = 4
|
||||
indent_style = space
|
||||
insert_final_newline = true
|
||||
max_line_length = 120
|
||||
|
||||
[*.nix]
|
||||
indent_size = 2
|
4
.envrc
4
.envrc
|
@ -1 +1,5 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
use flake
|
||||
|
||||
PATH_add bin
|
||||
|
|
6
.gitignore
vendored
6
.gitignore
vendored
|
@ -68,3 +68,9 @@ cached_target
|
|||
|
||||
# Direnv cache
|
||||
/.direnv
|
||||
|
||||
# Gitlab CI cache
|
||||
/.gitlab-ci.d
|
||||
|
||||
# mdbook output
|
||||
public/
|
410
.gitlab-ci.yml
410
.gitlab-ci.yml
|
@ -1,244 +1,184 @@
|
|||
stages:
|
||||
- build
|
||||
- build docker image
|
||||
- test
|
||||
- upload artifacts
|
||||
- ci
|
||||
- artifacts
|
||||
- publish
|
||||
|
||||
variables:
|
||||
# Make GitLab CI go fast:
|
||||
GIT_SUBMODULE_STRATEGY: recursive
|
||||
FF_USE_FASTZIP: 1
|
||||
CACHE_COMPRESSION_LEVEL: fastest
|
||||
|
||||
# --------------------------------------------------------------------- #
|
||||
# Create and publish docker image #
|
||||
# --------------------------------------------------------------------- #
|
||||
|
||||
.docker-shared-settings:
|
||||
stage: "build docker image"
|
||||
needs: []
|
||||
tags: [ "docker" ]
|
||||
variables:
|
||||
# Docker in Docker:
|
||||
DOCKER_BUILDKIT: 1
|
||||
image:
|
||||
name: docker.io/docker
|
||||
services:
|
||||
- name: docker.io/docker:dind
|
||||
alias: docker
|
||||
script:
|
||||
- apk add openssh-client
|
||||
- eval $(ssh-agent -s)
|
||||
- mkdir -p ~/.ssh && chmod 700 ~/.ssh
|
||||
- printf "Host *\n\tStrictHostKeyChecking no\n\n" >> ~/.ssh/config
|
||||
- sh .gitlab/setup-buildx-remote-builders.sh
|
||||
# Authorize against this project's own image registry:
|
||||
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
|
||||
# Build multiplatform image and push to temporary tag:
|
||||
- >
|
||||
docker buildx build
|
||||
--platform "linux/arm/v7,linux/arm64,linux/amd64"
|
||||
--pull
|
||||
--tag "$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
|
||||
--push
|
||||
--provenance=false
|
||||
--file "Dockerfile" .
|
||||
# Build multiplatform image to deb stage and extract their .deb files:
|
||||
- >
|
||||
docker buildx build
|
||||
--platform "linux/arm/v7,linux/arm64,linux/amd64"
|
||||
--target "packager-result"
|
||||
--output="type=local,dest=/tmp/build-output"
|
||||
--provenance=false
|
||||
--file "Dockerfile" .
|
||||
# Build multiplatform image to binary stage and extract their binaries:
|
||||
- >
|
||||
docker buildx build
|
||||
--platform "linux/arm/v7,linux/arm64,linux/amd64"
|
||||
--target "builder-result"
|
||||
--output="type=local,dest=/tmp/build-output"
|
||||
--provenance=false
|
||||
--file "Dockerfile" .
|
||||
# Copy to GitLab container registry:
|
||||
- >
|
||||
docker buildx imagetools create
|
||||
--tag "$CI_REGISTRY_IMAGE/$TAG"
|
||||
--tag "$CI_REGISTRY_IMAGE/$TAG-bullseye"
|
||||
--tag "$CI_REGISTRY_IMAGE/$TAG-commit-$CI_COMMIT_SHORT_SHA"
|
||||
"$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
|
||||
# if DockerHub credentials exist, also copy to dockerhub:
|
||||
- if [ -n "${DOCKER_HUB}" ]; then docker login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD" "$DOCKER_HUB"; fi
|
||||
- >
|
||||
if [ -n "${DOCKER_HUB}" ]; then
|
||||
docker buildx imagetools create
|
||||
--tag "$DOCKER_HUB_IMAGE/$TAG"
|
||||
--tag "$DOCKER_HUB_IMAGE/$TAG-bullseye"
|
||||
--tag "$DOCKER_HUB_IMAGE/$TAG-commit-$CI_COMMIT_SHORT_SHA"
|
||||
"$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
|
||||
; fi
|
||||
- mv /tmp/build-output ./
|
||||
artifacts:
|
||||
paths:
|
||||
- "./build-output/"
|
||||
|
||||
docker:next:
|
||||
extends: .docker-shared-settings
|
||||
rules:
|
||||
- if: '$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_BRANCH == "next"'
|
||||
variables:
|
||||
TAG: "matrix-conduit:next"
|
||||
|
||||
docker:master:
|
||||
extends: .docker-shared-settings
|
||||
rules:
|
||||
- if: '$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_BRANCH == "master"'
|
||||
variables:
|
||||
TAG: "matrix-conduit:latest"
|
||||
|
||||
docker:tags:
|
||||
extends: .docker-shared-settings
|
||||
rules:
|
||||
- if: "$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_TAG"
|
||||
variables:
|
||||
TAG: "matrix-conduit:$CI_COMMIT_TAG"
|
||||
|
||||
|
||||
docker build debugging:
|
||||
extends: .docker-shared-settings
|
||||
rules:
|
||||
- if: "$CI_MERGE_REQUEST_TITLE =~ /.*[Dd]ocker.*/"
|
||||
variables:
|
||||
TAG: "matrix-conduit-docker-tests:latest"
|
||||
|
||||
# --------------------------------------------------------------------- #
|
||||
# Run tests #
|
||||
# --------------------------------------------------------------------- #
|
||||
|
||||
cargo check:
|
||||
stage: test
|
||||
image: docker.io/rust:1.70.0-bullseye
|
||||
needs: []
|
||||
interruptible: true
|
||||
before_script:
|
||||
- "rustup show && rustc --version && cargo --version" # Print version info for debugging
|
||||
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
|
||||
script:
|
||||
- cargo check
|
||||
|
||||
|
||||
.test-shared-settings:
|
||||
stage: "test"
|
||||
needs: []
|
||||
image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest"
|
||||
tags: ["docker"]
|
||||
variables:
|
||||
CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow
|
||||
interruptible: true
|
||||
|
||||
test:cargo:
|
||||
extends: .test-shared-settings
|
||||
before_script:
|
||||
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
|
||||
script:
|
||||
- rustc --version && cargo --version # Print version info for debugging
|
||||
- "cargo test --color always --workspace --verbose --locked --no-fail-fast"
|
||||
|
||||
test:clippy:
|
||||
extends: .test-shared-settings
|
||||
allow_failure: true
|
||||
before_script:
|
||||
- rustup component add clippy
|
||||
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
|
||||
script:
|
||||
- rustc --version && cargo --version # Print version info for debugging
|
||||
- "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json"
|
||||
artifacts:
|
||||
when: always
|
||||
reports:
|
||||
codequality: gl-code-quality-report.json
|
||||
|
||||
test:format:
|
||||
extends: .test-shared-settings
|
||||
before_script:
|
||||
- rustup component add rustfmt
|
||||
script:
|
||||
- cargo fmt --all -- --check
|
||||
|
||||
test:audit:
|
||||
extends: .test-shared-settings
|
||||
allow_failure: true
|
||||
script:
|
||||
- cargo audit --color always || true
|
||||
- cargo audit --stale --json | gitlab-report -p audit > gl-sast-report.json
|
||||
artifacts:
|
||||
when: always
|
||||
reports:
|
||||
sast: gl-sast-report.json
|
||||
|
||||
test:dockerlint:
|
||||
stage: "test"
|
||||
needs: []
|
||||
image: "ghcr.io/hadolint/hadolint@sha256:6c4b7c23f96339489dd35f21a711996d7ce63047467a9a562287748a03ad5242" # 2.8.0-alpine
|
||||
interruptible: true
|
||||
script:
|
||||
- hadolint --version
|
||||
# First pass: Print for CI log:
|
||||
- >
|
||||
hadolint
|
||||
--no-fail --verbose
|
||||
./Dockerfile
|
||||
# Then output the results into a json for GitLab to pretty-print this in the MR:
|
||||
- >
|
||||
hadolint
|
||||
--format gitlab_codeclimate
|
||||
--failure-threshold error
|
||||
./Dockerfile > dockerlint.json
|
||||
artifacts:
|
||||
when: always
|
||||
reports:
|
||||
codequality: dockerlint.json
|
||||
paths:
|
||||
- dockerlint.json
|
||||
rules:
|
||||
- if: '$CI_COMMIT_REF_NAME != "master"'
|
||||
changes:
|
||||
- docker/*Dockerfile
|
||||
- Dockerfile
|
||||
- .gitlab-ci.yml
|
||||
- if: '$CI_COMMIT_REF_NAME == "master"'
|
||||
- if: '$CI_COMMIT_REF_NAME == "next"'
|
||||
|
||||
# --------------------------------------------------------------------- #
|
||||
# Store binaries as package so they have download urls #
|
||||
# --------------------------------------------------------------------- #
|
||||
|
||||
# DISABLED FOR NOW, NEEDS TO BE FIXED AT A LATER TIME:
|
||||
|
||||
#publish:package:
|
||||
# stage: "upload artifacts"
|
||||
# needs:
|
||||
# - "docker:tags"
|
||||
# rules:
|
||||
# - if: "$CI_COMMIT_TAG"
|
||||
# image: curlimages/curl:latest
|
||||
# tags: ["docker"]
|
||||
# variables:
|
||||
# GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts
|
||||
# script:
|
||||
# - 'BASE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}"'
|
||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_amd64/conduit "${BASE_URL}/conduit-x86_64-unknown-linux-gnu"'
|
||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm_v7/conduit "${BASE_URL}/conduit-armv7-unknown-linux-gnu"'
|
||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm64/conduit "${BASE_URL}/conduit-aarch64-unknown-linux-gnu"'
|
||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_amd64/conduit.deb "${BASE_URL}/conduit-x86_64-unknown-linux-gnu.deb"'
|
||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm_v7/conduit.deb "${BASE_URL}/conduit-armv7-unknown-linux-gnu.deb"'
|
||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm64/conduit.deb "${BASE_URL}/conduit-aarch64-unknown-linux-gnu.deb"'
|
||||
# Makes some things print in color
|
||||
TERM: ansi
|
||||
|
||||
# Avoid duplicate pipelines
|
||||
# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
|
||||
workflow:
|
||||
rules:
|
||||
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
|
||||
- if: "$CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS"
|
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||
- if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS
|
||||
when: never
|
||||
- if: "$CI_COMMIT_BRANCH"
|
||||
- if: "$CI_COMMIT_TAG"
|
||||
- if: $CI
|
||||
|
||||
before_script:
|
||||
# Enable nix-command and flakes
|
||||
- if command -v nix > /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi
|
||||
|
||||
# Add our own binary cache
|
||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://nix.computer.surgery/conduit" >> /etc/nix/nix.conf; fi
|
||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduit:ZGAf6P6LhNvnoJJ3Me3PRg7tlLSrPxcQ2RiE5LIppjo=" >> /etc/nix/nix.conf; fi
|
||||
|
||||
# Add alternate binary cache
|
||||
- if command -v nix > /dev/null && [ -n "$ATTIC_ENDPOINT" ]; then echo "extra-substituters = $ATTIC_ENDPOINT" >> /etc/nix/nix.conf; fi
|
||||
- if command -v nix > /dev/null && [ -n "$ATTIC_PUBLIC_KEY" ]; then echo "extra-trusted-public-keys = $ATTIC_PUBLIC_KEY" >> /etc/nix/nix.conf; fi
|
||||
|
||||
# Add crane binary cache
|
||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf; fi
|
||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf; fi
|
||||
|
||||
# Add nix-community binary cache
|
||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf; fi
|
||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf; fi
|
||||
|
||||
# Install direnv and nix-direnv
|
||||
- if command -v nix > /dev/null; then nix-env -iA nixpkgs.direnv nixpkgs.nix-direnv; fi
|
||||
|
||||
# Allow .envrc
|
||||
- if command -v nix > /dev/null; then direnv allow; fi
|
||||
|
||||
# Set CARGO_HOME to a cacheable path
|
||||
- export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo"
|
||||
|
||||
ci:
|
||||
stage: ci
|
||||
image: nixos/nix:2.20.4
|
||||
script:
|
||||
# Cache the inputs required for the devShell
|
||||
- ./bin/nix-build-and-cache .#devShells.x86_64-linux.default.inputDerivation
|
||||
|
||||
- direnv exec . engage
|
||||
cache:
|
||||
key: nix
|
||||
paths:
|
||||
- target
|
||||
- .gitlab-ci.d
|
||||
rules:
|
||||
# CI on upstream runners (only available for maintainers)
|
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event" && $IS_UPSTREAM_CI == "true"
|
||||
# Manual CI on unprotected branches that are not MRs
|
||||
- if: $CI_PIPELINE_SOURCE != "merge_request_event" && $CI_COMMIT_REF_PROTECTED == "false"
|
||||
when: manual
|
||||
# Manual CI on forks
|
||||
- if: $IS_UPSTREAM_CI != "true"
|
||||
when: manual
|
||||
- if: $CI
|
||||
interruptible: true
|
||||
|
||||
artifacts:
|
||||
stage: artifacts
|
||||
image: nixos/nix:2.20.4
|
||||
script:
|
||||
- ./bin/nix-build-and-cache .#static-x86_64-unknown-linux-musl
|
||||
- cp result/bin/conduit x86_64-unknown-linux-musl
|
||||
|
||||
- mkdir -p target/release
|
||||
- cp result/bin/conduit target/release
|
||||
- direnv exec . cargo deb --no-build
|
||||
- mv target/debian/*.deb x86_64-unknown-linux-musl.deb
|
||||
|
||||
# Since the OCI image package is based on the binary package, this has the
|
||||
# fun side effect of uploading the normal binary too. Conduit users who are
|
||||
# deploying with Nix can leverage this fact by adding our binary cache to
|
||||
# their systems.
|
||||
#
|
||||
# Note that although we have an `oci-image-x86_64-unknown-linux-musl`
|
||||
# output, we don't build it because it would be largely redundant to this
|
||||
# one since it's all containerized anyway.
|
||||
- ./bin/nix-build-and-cache .#oci-image
|
||||
- cp result oci-image-amd64.tar.gz
|
||||
|
||||
- ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl
|
||||
- cp result/bin/conduit aarch64-unknown-linux-musl
|
||||
|
||||
- ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl
|
||||
- cp result oci-image-arm64v8.tar.gz
|
||||
|
||||
- ./bin/nix-build-and-cache .#book
|
||||
# We can't just copy the symlink, we need to dereference it https://gitlab.com/gitlab-org/gitlab/-/issues/19746
|
||||
- cp -r --dereference result public
|
||||
artifacts:
|
||||
paths:
|
||||
- x86_64-unknown-linux-musl
|
||||
- aarch64-unknown-linux-musl
|
||||
- x86_64-unknown-linux-musl.deb
|
||||
- oci-image-amd64.tar.gz
|
||||
- oci-image-arm64v8.tar.gz
|
||||
- public
|
||||
rules:
|
||||
# CI required for all MRs
|
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||
# Optional CI on forks
|
||||
- if: $IS_UPSTREAM_CI != "true"
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: $CI
|
||||
interruptible: true
|
||||
|
||||
.push-oci-image:
|
||||
stage: publish
|
||||
image: docker:25.0.0
|
||||
services:
|
||||
- docker:25.0.0-dind
|
||||
variables:
|
||||
IMAGE_SUFFIX_AMD64: amd64
|
||||
IMAGE_SUFFIX_ARM64V8: arm64v8
|
||||
script:
|
||||
- docker load -i oci-image-amd64.tar.gz
|
||||
- IMAGE_ID_AMD64=$(docker images -q conduit:next)
|
||||
- docker load -i oci-image-arm64v8.tar.gz
|
||||
- IMAGE_ID_ARM64V8=$(docker images -q conduit:next)
|
||||
# Tag and push the architecture specific images
|
||||
- docker tag $IMAGE_ID_AMD64 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64
|
||||
- docker tag $IMAGE_ID_ARM64V8 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
||||
- docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64
|
||||
- docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
||||
# Tag the multi-arch image
|
||||
- docker manifest create $IMAGE_NAME:$CI_COMMIT_SHA --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
||||
- docker manifest push $IMAGE_NAME:$CI_COMMIT_SHA
|
||||
# Tag and push the git ref
|
||||
- docker manifest create $IMAGE_NAME:$CI_COMMIT_REF_NAME --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
||||
- docker manifest push $IMAGE_NAME:$CI_COMMIT_REF_NAME
|
||||
# Tag git tags as 'latest'
|
||||
- |
|
||||
if [[ -n "$CI_COMMIT_TAG" ]]; then
|
||||
docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
||||
docker manifest push $IMAGE_NAME:latest
|
||||
fi
|
||||
dependencies:
|
||||
- artifacts
|
||||
only:
|
||||
- next
|
||||
- master
|
||||
- tags
|
||||
|
||||
oci-image:push-gitlab:
|
||||
extends: .push-oci-image
|
||||
variables:
|
||||
IMAGE_NAME: $CI_REGISTRY_IMAGE/matrix-conduit
|
||||
before_script:
|
||||
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
||||
|
||||
oci-image:push-dockerhub:
|
||||
extends: .push-oci-image
|
||||
variables:
|
||||
IMAGE_NAME: matrixconduit/matrix-conduit
|
||||
before_script:
|
||||
- docker login -u $DOCKER_HUB_USER -p $DOCKER_HUB_PASSWORD
|
||||
|
||||
pages:
|
||||
stage: publish
|
||||
dependencies:
|
||||
- artifacts
|
||||
only:
|
||||
- next
|
||||
script:
|
||||
- "true"
|
||||
artifacts:
|
||||
paths:
|
||||
- public
|
||||
|
|
3
.gitlab/route-map.yml
Normal file
3
.gitlab/route-map.yml
Normal file
|
@ -0,0 +1,3 @@
|
|||
# Docs: Map markdown to html files
|
||||
- source: /docs/(.+)\.md/
|
||||
public: '\1.html'
|
1580
Cargo.lock
generated
1580
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
52
Cargo.toml
52
Cargo.toml
|
@ -1,3 +1,14 @@
|
|||
# Keep alphabetically sorted
|
||||
[workspace.lints.rust]
|
||||
explicit_outlives_requirements = "warn"
|
||||
unused_qualifications = "warn"
|
||||
|
||||
# Keep alphabetically sorted
|
||||
[workspace.lints.clippy]
|
||||
cloned_instead_of_copied = "warn"
|
||||
dbg_macro = "warn"
|
||||
str_to_string = "warn"
|
||||
|
||||
[package]
|
||||
name = "conduit"
|
||||
description = "A Matrix homeserver written in Rust"
|
||||
|
@ -9,14 +20,14 @@ readme = "README.md"
|
|||
version = "0.7.0-alpha"
|
||||
edition = "2021"
|
||||
|
||||
# When changing this, make sure to update the `flake.lock` file by running
|
||||
# `nix flake update`. If you don't have Nix installed or otherwise don't know
|
||||
# how to do this, ping `@charles:computer.surgery` or `@dusk:gaze.systems` in
|
||||
# the matrix room.
|
||||
rust-version = "1.70.0"
|
||||
# See also `rust-toolchain.toml`
|
||||
rust-version = "1.75.0"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Web framework
|
||||
axum = { version = "0.6.18", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true }
|
||||
|
@ -26,7 +37,7 @@ tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitiv
|
|||
|
||||
# Used for matrix spec type definitions and helpers
|
||||
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
||||
ruma = { git = "https://github.com/ruma/ruma", rev = "b4853aa8fa5e3a24e3689fc88044de9915f6ab67", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
||||
ruma = { git = "https://github.com/ruma/ruma", rev = "5495b85aa311c2805302edb0a7de40399e22b397", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
||||
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
||||
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
||||
|
||||
|
@ -53,7 +64,8 @@ rand = "0.8.5"
|
|||
# Used to hash passwords
|
||||
rust-argon2 = "1.0.0"
|
||||
# Used to send requests
|
||||
reqwest = { default-features = false, features = ["rustls-tls-native-roots", "socks"], git = "https://github.com/timokoesters/reqwest", rev = "57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" }
|
||||
hyper = "0.14.26"
|
||||
reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls-native-roots", "socks"] }
|
||||
# Used for conduit::Error type
|
||||
thiserror = "1.0.40"
|
||||
# Used to generate thumbnails for images
|
||||
|
@ -61,13 +73,13 @@ image = { version = "0.24.6", default-features = false, features = ["jpeg", "png
|
|||
# Used to encode server public key
|
||||
base64 = "0.21.2"
|
||||
# Used when hashing the state
|
||||
ring = "0.16.20"
|
||||
ring = "0.17.7"
|
||||
# Used when querying the SRV record of other servers
|
||||
trust-dns-resolver = "0.22.0"
|
||||
# Used to find matching events for appservices
|
||||
regex = "1.8.1"
|
||||
# jwt jsonwebtokens
|
||||
jsonwebtoken = "8.3.0"
|
||||
jsonwebtoken = "9.2.0"
|
||||
# Performance measurements
|
||||
tracing = { version = "0.1.37", features = [] }
|
||||
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
|
||||
|
@ -78,21 +90,19 @@ tracing-opentelemetry = "0.18.0"
|
|||
lru-cache = "0.1.2"
|
||||
rusqlite = { version = "0.29.0", optional = true, features = ["bundled"] }
|
||||
parking_lot = { version = "0.12.1", optional = true }
|
||||
crossbeam = { version = "0.8.2", optional = true }
|
||||
# crossbeam = { version = "0.8.2", optional = true }
|
||||
num_cpus = "1.15.0"
|
||||
threadpool = "1.8.1"
|
||||
heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
|
||||
# heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
|
||||
# Used for ruma wrapper
|
||||
serde_html_form = "0.2.0"
|
||||
|
||||
rocksdb = { version = "0.21.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true }
|
||||
|
||||
thread_local = "1.1.7"
|
||||
# used for TURN server authentication
|
||||
hmac = "0.12.1"
|
||||
sha-1 = "0.10.1"
|
||||
# used for conduit's CLI and admin room command parsing
|
||||
clap = { version = "4.3.0", default-features = false, features = ["std", "derive", "help", "usage", "error-context"] }
|
||||
clap = { version = "4.3.0", default-features = false, features = ["std", "derive", "help", "usage", "error-context", "string"] }
|
||||
futures-util = { version = "0.3.28", default-features = false }
|
||||
# Used for reading the configuration from conduit.toml & environment variables
|
||||
figment = { version = "0.10.8", features = ["env", "toml"] }
|
||||
|
@ -104,15 +114,25 @@ async-trait = "0.1.68"
|
|||
|
||||
sd-notify = { version = "0.4.1", optional = true }
|
||||
|
||||
[dependencies.rocksdb]
|
||||
package = "rust-rocksdb"
|
||||
version = "0.22.7"
|
||||
optional = true
|
||||
features = [
|
||||
"multi-threaded-cf",
|
||||
"zstd",
|
||||
"lz4",
|
||||
]
|
||||
|
||||
[target.'cfg(unix)'.dependencies]
|
||||
nix = { version = "0.26.2", features = ["resource"] }
|
||||
nix = { version = "0.28", features = ["resource"] }
|
||||
|
||||
[features]
|
||||
default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "systemd"]
|
||||
#backend_sled = ["sled"]
|
||||
backend_persy = ["persy", "parking_lot"]
|
||||
backend_sqlite = ["sqlite"]
|
||||
backend_heed = ["heed", "crossbeam"]
|
||||
#backend_heed = ["heed", "crossbeam"]
|
||||
backend_rocksdb = ["rocksdb"]
|
||||
jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"]
|
||||
sqlite = ["rusqlite", "parking_lot", "tokio/signal"]
|
||||
|
|
132
Dockerfile
132
Dockerfile
|
@ -1,132 +0,0 @@
|
|||
# syntax=docker/dockerfile:1
|
||||
FROM docker.io/rust:1.70-bullseye AS base
|
||||
|
||||
FROM base AS builder
|
||||
WORKDIR /usr/src/conduit
|
||||
|
||||
# Install required packages to build Conduit and it's dependencies
|
||||
RUN apt-get update && \
|
||||
apt-get -y --no-install-recommends install libclang-dev=1:11.0-51+nmu5
|
||||
|
||||
# == Build dependencies without our own code separately for caching ==
|
||||
#
|
||||
# Need a fake main.rs since Cargo refuses to build anything otherwise.
|
||||
#
|
||||
# See https://github.com/rust-lang/cargo/issues/2644 for a Cargo feature
|
||||
# request that would allow just dependencies to be compiled, presumably
|
||||
# regardless of whether source files are available.
|
||||
RUN mkdir src && touch src/lib.rs && echo 'fn main() {}' > src/main.rs
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
RUN cargo build --release && rm -r src
|
||||
|
||||
# Copy over actual Conduit sources
|
||||
COPY src src
|
||||
|
||||
# main.rs and lib.rs need their timestamp updated for this to work correctly since
|
||||
# otherwise the build with the fake main.rs from above is newer than the
|
||||
# source files (COPY preserves timestamps).
|
||||
#
|
||||
# Builds conduit and places the binary at /usr/src/conduit/target/release/conduit
|
||||
RUN touch src/main.rs && touch src/lib.rs && cargo build --release
|
||||
|
||||
|
||||
# ONLY USEFUL FOR CI: target stage to extract build artifacts
|
||||
FROM scratch AS builder-result
|
||||
COPY --from=builder /usr/src/conduit/target/release/conduit /conduit
|
||||
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------------------------------------------
|
||||
# Build cargo-deb, a tool to package up rust binaries into .deb packages for Debian/Ubuntu based systems:
|
||||
# ---------------------------------------------------------------------------------------------------------------
|
||||
FROM base AS build-cargo-deb
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
dpkg \
|
||||
dpkg-dev \
|
||||
liblzma-dev
|
||||
|
||||
RUN cargo install cargo-deb
|
||||
# => binary is in /usr/local/cargo/bin/cargo-deb
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------------------------------------------
|
||||
# Package conduit build-result into a .deb package:
|
||||
# ---------------------------------------------------------------------------------------------------------------
|
||||
FROM builder AS packager
|
||||
WORKDIR /usr/src/conduit
|
||||
|
||||
COPY ./LICENSE ./LICENSE
|
||||
COPY ./README.md ./README.md
|
||||
COPY debian ./debian
|
||||
COPY --from=build-cargo-deb /usr/local/cargo/bin/cargo-deb /usr/local/cargo/bin/cargo-deb
|
||||
|
||||
# --no-build makes cargo-deb reuse already compiled project
|
||||
RUN cargo deb --no-build
|
||||
# => Package is in /usr/src/conduit/target/debian/<project_name>_<version>_<arch>.deb
|
||||
|
||||
|
||||
# ONLY USEFUL FOR CI: target stage to extract build artifacts
|
||||
FROM scratch AS packager-result
|
||||
COPY --from=packager /usr/src/conduit/target/debian/*.deb /conduit.deb
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------------------------------------------
|
||||
# Stuff below this line actually ends up in the resulting docker image
|
||||
# ---------------------------------------------------------------------------------------------------------------
|
||||
FROM docker.io/debian:bullseye-slim AS runner
|
||||
|
||||
# Standard port on which Conduit launches.
|
||||
# You still need to map the port when using the docker command or docker-compose.
|
||||
EXPOSE 6167
|
||||
|
||||
ARG DEFAULT_DB_PATH=/var/lib/matrix-conduit
|
||||
|
||||
ENV CONDUIT_PORT=6167 \
|
||||
CONDUIT_ADDRESS="0.0.0.0" \
|
||||
CONDUIT_DATABASE_PATH=${DEFAULT_DB_PATH} \
|
||||
CONDUIT_CONFIG=''
|
||||
# └─> Set no config file to do all configuration with env vars
|
||||
|
||||
# Conduit needs:
|
||||
# dpkg: to install conduit.deb
|
||||
# ca-certificates: for https
|
||||
# iproute2 & wget: for the healthcheck script
|
||||
RUN apt-get update && apt-get -y --no-install-recommends install \
|
||||
dpkg \
|
||||
ca-certificates \
|
||||
iproute2 \
|
||||
wget \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Test if Conduit is still alive, uses the same endpoint as Element
|
||||
COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh
|
||||
HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
|
||||
|
||||
# Install conduit.deb:
|
||||
COPY --from=packager /usr/src/conduit/target/debian/*.deb /srv/conduit/
|
||||
RUN dpkg -i /srv/conduit/*.deb
|
||||
|
||||
# Improve security: Don't run stuff as root, that does not need to run as root
|
||||
# Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems.
|
||||
ARG USER_ID=1000
|
||||
ARG GROUP_ID=1000
|
||||
RUN set -x ; \
|
||||
groupadd -r -g ${GROUP_ID} conduit ; \
|
||||
useradd -l -r -M -d /srv/conduit -o -u ${USER_ID} -g conduit conduit && exit 0 ; exit 1
|
||||
|
||||
# Create database directory, change ownership of Conduit files to conduit user and group and make the healthcheck executable:
|
||||
RUN chown -cR conduit:conduit /srv/conduit && \
|
||||
chmod +x /srv/conduit/healthcheck.sh && \
|
||||
mkdir -p ${DEFAULT_DB_PATH} && \
|
||||
chown -cR conduit:conduit ${DEFAULT_DB_PATH}
|
||||
|
||||
# Change user to conduit, no root permissions afterwards:
|
||||
USER conduit
|
||||
# Set container home directory
|
||||
WORKDIR /srv/conduit
|
||||
|
||||
# Run Conduit and print backtraces on panics
|
||||
ENV RUST_BACKTRACE=1
|
||||
ENTRYPOINT [ "/usr/sbin/matrix-conduit" ]
|
60
README.md
60
README.md
|
@ -1,7 +1,15 @@
|
|||
# Conduit
|
||||
### A Matrix homeserver written in Rust
|
||||
|
||||
<!-- ANCHOR: catchphrase -->
|
||||
### A Matrix homeserver written in Rust
|
||||
<!-- ANCHOR_END: catchphrase -->
|
||||
|
||||
Please visit the [Conduit documentation](https://famedly.gitlab.io/conduit) for more information.
|
||||
Alternatively you can open [docs/introduction.md](docs/introduction.md) in this repository.
|
||||
|
||||
<!-- ANCHOR: body -->
|
||||
#### What is Matrix?
|
||||
|
||||
[Matrix](https://matrix.org) is an open network for secure and decentralized
|
||||
communication. Users from every Matrix homeserver can chat with users from all
|
||||
other Matrix servers. You can even use bridges (also called Matrix appservices)
|
||||
|
@ -15,8 +23,7 @@ friends or company.
|
|||
|
||||
#### Can I try it out?
|
||||
|
||||
Yes! You can test our Conduit instance by opening a Matrix client (<https://app.element.io> or Element Android for
|
||||
example) and registering on the `conduit.rs` homeserver. The registration token is "for_testing_only". Don't share personal information.
|
||||
Yes! You can test our Conduit instance by opening a client that supports registration tokens such as [Element web](https://app.element.io/), [Nheko](https://matrix.org/ecosystem/clients/nheko/) or [SchildiChat web](https://app.schildi.chat/) and registering on the `conduit.rs` homeserver. The registration token is "for_testing_only". Don't share personal information. Once you have registered, you can use any other [Matrix client](https://matrix.org/ecosystem/clients) to login.
|
||||
|
||||
Server hosting for conduit.rs is donated by the Matrix.org Foundation.
|
||||
|
||||
|
@ -30,27 +37,25 @@ There are still a few important features missing:
|
|||
|
||||
- E2EE emoji comparison over federation (E2EE chat works)
|
||||
- Outgoing read receipts, typing, presence over federation (incoming works)
|
||||
<!-- ANCHOR_END: body -->
|
||||
|
||||
Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit/-/milestones/3).
|
||||
|
||||
#### How can I deploy my own?
|
||||
|
||||
- Simple install (this was tested the most): [DEPLOY.md](DEPLOY.md)
|
||||
- Debian package: [debian/README.md](debian/README.md)
|
||||
- Nix/NixOS: [nix/README.md](nix/README.md)
|
||||
- Docker: [docker/README.md](docker/README.md)
|
||||
|
||||
If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md](APPSERVICES.md).
|
||||
|
||||
<!-- ANCHOR: footer -->
|
||||
#### How can I contribute?
|
||||
|
||||
1. Look for an issue you would like to work on and make sure it's not assigned
|
||||
to other users
|
||||
2. Ask someone to assign the issue to you (comment on the issue or chat in
|
||||
[#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org))
|
||||
3. Fork the repo and work on the issue.[#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org) is happy to help :)
|
||||
1. Look for an issue you would like to work on and make sure no one else is currently working on it.
|
||||
2. Tell us that you are working on the issue (comment on the issue or chat in
|
||||
[#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org)). If it is more complicated, please explain your approach and ask questions.
|
||||
3. Fork the repo, create a new branch and push commits.
|
||||
4. Submit a MR
|
||||
|
||||
#### Contact
|
||||
|
||||
If you have any questions, feel free to
|
||||
- Ask in `#conduit:fachschaften.org` on Matrix
|
||||
- Write an E-Mail to `conduit@koesters.xyz`
|
||||
- Send an direct message to `@timokoesters:fachschaften.org` on Matrix
|
||||
- [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new)
|
||||
|
||||
#### Thanks to
|
||||
|
||||
Thanks to FUTO, Famedly, Prototype Fund (DLR and German BMBF) and all individuals for financially supporting this project.
|
||||
|
@ -60,20 +65,13 @@ Thanks to the contributors to Conduit and all libraries we use, for example:
|
|||
- Ruma: A clean library for the Matrix Spec in Rust
|
||||
- axum: A modular web framework
|
||||
|
||||
#### Contact
|
||||
|
||||
If you run into any question, feel free to
|
||||
- Ask us in `#conduit:fachschaften.org` on Matrix
|
||||
- Write an E-Mail to `conduit@koesters.xyz`
|
||||
- Send an direct message to `timokoesters@fachschaften.org` on Matrix
|
||||
- [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new)
|
||||
|
||||
#### Donate
|
||||
|
||||
Liberapay: <https://liberapay.com/timokoesters/>\
|
||||
Bitcoin: `bc1qnnykf986tw49ur7wx9rpw2tevpsztvar5x8w4n`
|
||||
- Liberapay: <https://liberapay.com/timokoesters/>
|
||||
- Bitcoin: `bc1qnnykf986tw49ur7wx9rpw2tevpsztvar5x8w4n`
|
||||
|
||||
#### Logo
|
||||
|
||||
Lightning Bolt Logo: https://github.com/mozilla/fxemoji/blob/gh-pages/svgs/nature/u26A1-bolt.svg \
|
||||
Logo License: https://github.com/mozilla/fxemoji/blob/gh-pages/LICENSE.md
|
||||
- Lightning Bolt Logo: <https://github.com/mozilla/fxemoji/blob/gh-pages/svgs/nature/u26A1-bolt.svg>
|
||||
- Logo License: <https://github.com/mozilla/fxemoji/blob/gh-pages/LICENSE.md>
|
||||
<!-- ANCHOR_END: footer -->
|
||||
|
|
37
bin/complement
Executable file
37
bin/complement
Executable file
|
@ -0,0 +1,37 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Path to Complement's source code
|
||||
COMPLEMENT_SRC="$1"
|
||||
|
||||
# A `.jsonl` file to write test logs to
|
||||
LOG_FILE="$2"
|
||||
|
||||
# A `.jsonl` file to write test results to
|
||||
RESULTS_FILE="$3"
|
||||
|
||||
OCI_IMAGE="complement-conduit:dev"
|
||||
|
||||
env \
|
||||
-C "$(git rev-parse --show-toplevel)" \
|
||||
docker build \
|
||||
--tag "$OCI_IMAGE" \
|
||||
--file complement/Dockerfile \
|
||||
.
|
||||
|
||||
# It's okay (likely, even) that `go test` exits nonzero
|
||||
set +o pipefail
|
||||
env \
|
||||
-C "$COMPLEMENT_SRC" \
|
||||
COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \
|
||||
go test -json ./tests | tee "$LOG_FILE"
|
||||
set -o pipefail
|
||||
|
||||
# Post-process the results into an easy-to-compare format
|
||||
cat "$LOG_FILE" | jq -c '
|
||||
select(
|
||||
(.Action == "pass" or .Action == "fail" or .Action == "skip")
|
||||
and .Test != null
|
||||
) | {Action: .Action, Test: .Test}
|
||||
' | sort > "$RESULTS_FILE"
|
26
bin/nix-build-and-cache
Executable file
26
bin/nix-build-and-cache
Executable file
|
@ -0,0 +1,26 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# The first argument must be the desired installable
|
||||
INSTALLABLE="$1"
|
||||
|
||||
# Build the installable and forward any other arguments too
|
||||
nix build "$@"
|
||||
|
||||
if [ ! -z ${ATTIC_TOKEN+x} ]; then
|
||||
nix run --inputs-from . attic -- \
|
||||
login \
|
||||
conduit \
|
||||
"${ATTIC_ENDPOINT:-https://nix.computer.surgery/conduit}" \
|
||||
"$ATTIC_TOKEN"
|
||||
|
||||
# Push the target installable and its build dependencies
|
||||
nix run --inputs-from . attic -- \
|
||||
push \
|
||||
conduit \
|
||||
"$(nix path-info "$INSTALLABLE" --derivation)" \
|
||||
"$(nix path-info "$INSTALLABLE")"
|
||||
else
|
||||
echo "\$ATTIC_TOKEN is unset, skipping uploading to the binary cache"
|
||||
fi
|
18
book.toml
Normal file
18
book.toml
Normal file
|
@ -0,0 +1,18 @@
|
|||
[book]
|
||||
title = "Conduit"
|
||||
description = "Conduit is a simple, fast and reliable chat server for the Matrix protocol"
|
||||
language = "en"
|
||||
multilingual = false
|
||||
src = "docs"
|
||||
|
||||
[build]
|
||||
build-dir = "public"
|
||||
create-missing = true
|
||||
|
||||
[output.html]
|
||||
git-repository-url = "https://gitlab.com/famedly/conduit"
|
||||
edit-url-template = "https://gitlab.com/famedly/conduit/-/edit/next/{path}"
|
||||
git-repository-icon = "fa-git-square"
|
||||
|
||||
[output.html.search]
|
||||
limit-results = 15
|
|
@ -1,26 +1,30 @@
|
|||
# For use in our CI only. This requires a build artifact created by a previous run pipline stage to be placed in cached_target/release/conduit
|
||||
FROM registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:commit-16a08e9b as builder
|
||||
#FROM rust:latest as builder
|
||||
FROM rust:1.75.0
|
||||
|
||||
WORKDIR /workdir
|
||||
|
||||
ARG RUSTC_WRAPPER
|
||||
ARG AWS_ACCESS_KEY_ID
|
||||
ARG AWS_SECRET_ACCESS_KEY
|
||||
ARG SCCACHE_BUCKET
|
||||
ARG SCCACHE_ENDPOINT
|
||||
ARG SCCACHE_S3_USE_SSL
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libclang-dev
|
||||
|
||||
COPY . .
|
||||
RUN mkdir -p target/release
|
||||
RUN test -e cached_target/release/conduit && cp cached_target/release/conduit target/release/conduit || cargo build --release
|
||||
|
||||
## Actual image
|
||||
FROM debian:bullseye
|
||||
WORKDIR /workdir
|
||||
COPY Cargo.toml Cargo.toml
|
||||
COPY Cargo.lock Cargo.lock
|
||||
COPY src src
|
||||
RUN cargo build --release \
|
||||
&& mv target/release/conduit conduit \
|
||||
&& rm -rf target
|
||||
|
||||
# Install caddy
|
||||
RUN apt-get update && apt-get install -y debian-keyring debian-archive-keyring apt-transport-https curl && curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/gpg.key' | gpg --dearmor -o /usr/share/keyrings/caddy-testing-archive-keyring.gpg && curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/debian.deb.txt' | tee /etc/apt/sources.list.d/caddy-testing.list && apt-get update && apt-get install -y caddy
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
debian-keyring \
|
||||
debian-archive-keyring \
|
||||
apt-transport-https \
|
||||
curl \
|
||||
&& curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/gpg.key' \
|
||||
| gpg --dearmor -o /usr/share/keyrings/caddy-testing-archive-keyring.gpg \
|
||||
&& curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/debian.deb.txt' \
|
||||
| tee /etc/apt/sources.list.d/caddy-testing.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y caddy
|
||||
|
||||
COPY conduit-example.toml conduit.toml
|
||||
COPY complement/caddy.json caddy.json
|
||||
|
@ -29,16 +33,9 @@ ENV SERVER_NAME=localhost
|
|||
ENV CONDUIT_CONFIG=/workdir/conduit.toml
|
||||
|
||||
RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml
|
||||
RUN echo "allow_federation = true" >> conduit.toml
|
||||
RUN echo "allow_check_for_updates = true" >> conduit.toml
|
||||
RUN echo "allow_encryption = true" >> conduit.toml
|
||||
RUN echo "allow_registration = true" >> conduit.toml
|
||||
RUN echo "log = \"warn,_=off,sled=off\"" >> conduit.toml
|
||||
RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml
|
||||
|
||||
COPY --from=builder /workdir/target/release/conduit /workdir/conduit
|
||||
RUN chmod +x /workdir/conduit
|
||||
|
||||
EXPOSE 8008 8448
|
||||
|
||||
CMD uname -a && \
|
||||
|
|
|
@ -1,13 +1,11 @@
|
|||
# Running Conduit on Complement
|
||||
# Complement
|
||||
|
||||
This assumes that you're familiar with complement, if not, please readme
|
||||
[their readme](https://github.com/matrix-org/complement#running).
|
||||
## What's that?
|
||||
|
||||
Complement works with "base images", this directory (and Dockerfile) helps build the conduit complement-ready docker
|
||||
image.
|
||||
Have a look at [its repository](https://github.com/matrix-org/complement).
|
||||
|
||||
To build, `cd` to the base directory of the workspace, and run this:
|
||||
## How do I use it with Conduit?
|
||||
|
||||
`docker build -t complement-conduit:dev -f complement/Dockerfile .`
|
||||
|
||||
Then use `complement-conduit:dev` as a base image for running complement tests.
|
||||
The script at [`../bin/complement`](../bin/complement) has automation for this.
|
||||
It takes a few command line arguments, you can read the script to find out what
|
||||
those are.
|
||||
|
|
|
@ -51,7 +51,11 @@ enable_lightning_bolt = true
|
|||
trusted_servers = ["matrix.org"]
|
||||
|
||||
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||
#log = "warn,state_res=warn,rocket=off,_=off,sled=off"
|
||||
|
||||
# Controls the log verbosity. See also [here][0].
|
||||
#
|
||||
# [0]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives
|
||||
#log = "..."
|
||||
|
||||
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
||||
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
|
||||
|
|
2
debian/README.md
vendored
2
debian/README.md
vendored
|
@ -5,7 +5,7 @@ Installation
|
|||
------------
|
||||
|
||||
Information about downloading, building and deploying the Debian package, see
|
||||
the "Installing Conduit" section in [DEPLOY.md](../DEPLOY.md).
|
||||
the "Installing Conduit" section in the Deploying docs.
|
||||
All following sections until "Setting up the Reverse Proxy" be ignored because
|
||||
this is handled automatically by the packaging.
|
||||
|
||||
|
|
1
debian/postinst
vendored
1
debian/postinst
vendored
|
@ -78,7 +78,6 @@ allow_check_for_updates = true
|
|||
trusted_servers = ["matrix.org"]
|
||||
|
||||
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||
#log = "warn,state_res=warn,rocket=off,_=off,sled=off"
|
||||
EOF
|
||||
fi
|
||||
;;
|
||||
|
|
10
default.nix
Normal file
10
default.nix
Normal file
|
@ -0,0 +1,10 @@
|
|||
(import
|
||||
(
|
||||
let lock = builtins.fromJSON (builtins.readFile ./flake.lock); in
|
||||
fetchTarball {
|
||||
url = lock.nodes.flake-compat.locked.url or "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz";
|
||||
sha256 = lock.nodes.flake-compat.locked.narHash;
|
||||
}
|
||||
)
|
||||
{ src = ./.; }
|
||||
).defaultNix
|
12
docs/SUMMARY.md
Normal file
12
docs/SUMMARY.md
Normal file
|
@ -0,0 +1,12 @@
|
|||
# Summary
|
||||
|
||||
- [Introduction](introduction.md)
|
||||
|
||||
- [Example configuration](configuration.md)
|
||||
- [Deploying](deploying.md)
|
||||
- [Generic](deploying/generic.md)
|
||||
- [Debian](deploying/debian.md)
|
||||
- [Docker](deploying/docker.md)
|
||||
- [NixOS](deploying/nixos.md)
|
||||
- [TURN](turn.md)
|
||||
- [Appservices](appservices.md)
|
5
docs/configuration.md
Normal file
5
docs/configuration.md
Normal file
|
@ -0,0 +1,5 @@
|
|||
# Example configuration
|
||||
|
||||
``` toml
|
||||
{{#include ../conduit-example.toml}}
|
||||
```
|
3
docs/deploying.md
Normal file
3
docs/deploying.md
Normal file
|
@ -0,0 +1,3 @@
|
|||
# Deploying
|
||||
|
||||
This chapter describes various ways to deploy Conduit.
|
1
docs/deploying/debian.md
Normal file
1
docs/deploying/debian.md
Normal file
|
@ -0,0 +1 @@
|
|||
{{#include ../../debian/README.md}}
|
|
@ -32,7 +32,6 @@ services:
|
|||
CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
||||
#CONDUIT_LOG: warn,rocket=off,_=off,sled=off
|
||||
CONDUIT_ADDRESS: 0.0.0.0
|
||||
CONDUIT_CONFIG: '' # Ignore this
|
||||
|
|
@ -33,7 +33,6 @@ services:
|
|||
# CONDUIT_PORT: 6167
|
||||
# CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string ''
|
||||
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
|
||||
# CONDUIT_LOG: info # default is: "warn,_=off,sled=off"
|
||||
# CONDUIT_ALLOW_JAEGER: 'false'
|
||||
# CONDUIT_ALLOW_ENCRYPTION: 'true'
|
||||
# CONDUIT_ALLOW_FEDERATION: 'true'
|
|
@ -32,7 +32,6 @@ services:
|
|||
CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
||||
#CONDUIT_LOG: warn,rocket=off,_=off,sled=off
|
||||
CONDUIT_ADDRESS: 0.0.0.0
|
||||
CONDUIT_CONFIG: '' # Ignore this
|
||||
#
|
|
@ -1,4 +1,4 @@
|
|||
# Deploy using Docker
|
||||
# Conduit for Docker
|
||||
|
||||
> **Note:** To run and use Conduit you should probably use it with a Domain or Subdomain behind a reverse proxy (like Nginx, Traefik, Apache, ...) with a Lets Encrypt certificate.
|
||||
|
||||
|
@ -64,13 +64,12 @@ docker run -d -p 8448:6167 \
|
|||
-e CONDUIT_MAX_REQUEST_SIZE="20_000_000" \
|
||||
-e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \
|
||||
-e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \
|
||||
-e CONDUIT_LOG="warn,rocket=off,_=off,sled=off" \
|
||||
--name conduit <link>
|
||||
```
|
||||
|
||||
or you can use [docker-compose](#docker-compose).
|
||||
|
||||
The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../conduit-example.toml).
|
||||
The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../configuration.md).
|
||||
You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need
|
||||
to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible values, please take a look at the `docker-compose.yml` file.
|
||||
|
||||
|
@ -88,8 +87,7 @@ Depending on your proxy setup, you can use one of the following files;
|
|||
When picking the traefik-related compose file, rename it so it matches `docker-compose.yml`, and
|
||||
rename the override file to `docker-compose.override.yml`. Edit the latter with the values you want
|
||||
for your server.
|
||||
|
||||
Additional info about deploying Conduit can be found [here](../DEPLOY.md).
|
||||
Additional info about deploying Conduit can be found [here](generic.md).
|
||||
|
||||
### Build
|
||||
|
||||
|
@ -131,7 +129,7 @@ So...step by step:
|
|||
1. Copy [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or
|
||||
[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and [`docker-compose.override.yml`](docker-compose.override.yml) from the repository and remove `.for-traefik` (or `.with-traefik`) from the filename.
|
||||
2. Open both files and modify/adjust them to your needs. Meaning, change the `CONDUIT_SERVER_NAME` and the volume host mappings according to your needs.
|
||||
3. Create the `conduit.toml` config file, an example can be found [here](../conduit-example.toml), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars.
|
||||
3. Create the `conduit.toml` config file, an example can be found [here](../configuration.md), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars.
|
||||
4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`.
|
||||
5. Create the files needed by the `well-known` service.
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
# Deploying Conduit
|
||||
# Generic deployment documentation
|
||||
|
||||
> ## Getting help
|
||||
>
|
||||
|
@ -12,11 +12,13 @@ only offer Linux binaries.
|
|||
|
||||
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the appropriate url:
|
||||
|
||||
| CPU Architecture | Download stable version | Download development version |
|
||||
| ------------------------------------------- | --------------------------------------------------------------- | ----------------------------------------------------------- |
|
||||
| x84_64 / amd64 (Most servers and computers) | [Binary][x84_64-glibc-master] / [.deb][x84_64-glibc-master-deb] | [Binary][x84_64-glibc-next] / [.deb][x84_64-glibc-next-deb] |
|
||||
| armv7 (e.g. Raspberry Pi by default) | [Binary][armv7-glibc-master] / [.deb][armv7-glibc-master-deb] | [Binary][armv7-glibc-next] / [.deb][armv7-glibc-next-deb] |
|
||||
| armv8 / aarch64 | [Binary][armv8-glibc-master] / [.deb][armv8-glibc-master-deb] | [Binary][armv8-glibc-next] / [.deb][armv8-glibc-next-deb] |
|
||||
**Stable versions:**
|
||||
|
||||
| CPU Architecture | Download stable version |
|
||||
| ------------------------------------------- | --------------------------------------------------------------- |
|
||||
| x84_64 / amd64 (Most servers and computers) | [Binary][x84_64-glibc-master] / [.deb][x84_64-glibc-master-deb] |
|
||||
| armv7 (e.g. Raspberry Pi by default) | [Binary][armv7-glibc-master] / [.deb][armv7-glibc-master-deb] |
|
||||
| armv8 / aarch64 | [Binary][armv8-glibc-master] / [.deb][armv8-glibc-master-deb] |
|
||||
|
||||
These builds were created on and linked against the glibc version shipped with Debian bullseye.
|
||||
If you use a system with an older glibc version (e.g. RHEL8), you might need to compile Conduit yourself.
|
||||
|
@ -24,15 +26,19 @@ If you use a system with an older glibc version (e.g. RHEL8), you might need to
|
|||
[x84_64-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit?job=docker:master
|
||||
[armv7-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit?job=docker:master
|
||||
[armv8-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit?job=docker:master
|
||||
[x84_64-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_amd64/conduit?job=docker:next
|
||||
[armv7-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm_v7/conduit?job=docker:next
|
||||
[armv8-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm64/conduit?job=docker:next
|
||||
[x84_64-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit.deb?job=docker:master
|
||||
[armv7-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit.deb?job=docker:master
|
||||
[armv8-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit.deb?job=docker:master
|
||||
[x84_64-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_amd64/conduit.deb?job=docker:next
|
||||
[armv7-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm_v7/conduit.deb?job=docker:next
|
||||
[armv8-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm64/conduit.deb?job=docker:next
|
||||
|
||||
**Latest versions:**
|
||||
|
||||
| Target | Type | Download |
|
||||
|-|-|-|
|
||||
| `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl.deb?job=artifacts) |
|
||||
| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl?job=artifacts) |
|
||||
| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl?job=artifacts) |
|
||||
| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=artifacts) |
|
||||
| `aarch64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-arm64v8.tar.gz?job=artifacts) |
|
||||
|
||||
```bash
|
||||
$ sudo wget -O /usr/local/bin/matrix-conduit <url>
|
||||
|
@ -53,26 +59,6 @@ Then, `cd` into the source tree of conduit-next and run:
|
|||
$ cargo build --release
|
||||
```
|
||||
|
||||
If you want to cross compile Conduit to another architecture, read the guide below.
|
||||
|
||||
<details>
|
||||
<summary>Cross compilation</summary>
|
||||
|
||||
As easiest way to compile conduit for another platform [cross-rs](https://github.com/cross-rs/cross) is recommended, so install it first.
|
||||
|
||||
In order to use RockDB as storage backend append `-latomic` to linker flags.
|
||||
|
||||
For example, to build a binary for Raspberry Pi Zero W (ARMv6) you need `arm-unknown-linux-gnueabihf` as compilation
|
||||
target.
|
||||
|
||||
```bash
|
||||
git clone https://gitlab.com/famedly/conduit.git
|
||||
cd conduit
|
||||
export RUSTFLAGS='-C link-arg=-lgcc -Clink-arg=-latomic -Clink-arg=-static-libgcc'
|
||||
cross build --release --no-default-features --features conduit_bin,backend_rocksdb,jemalloc --target=arm-unknown-linux-gnueabihf
|
||||
```
|
||||
</details>
|
||||
|
||||
## Adding a Conduit user
|
||||
|
||||
While Conduit can run as any user it is usually better to use dedicated users for different services. This also allows
|
||||
|
@ -133,57 +119,12 @@ $ sudo systemctl daemon-reload
|
|||
|
||||
## Creating the Conduit configuration file
|
||||
|
||||
Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste this in **and take a moment
|
||||
to read it. You need to change at least the server name.**
|
||||
Now we need to create the Conduit's config file in
|
||||
`/etc/matrix-conduit/conduit.toml`. Paste in the contents of
|
||||
[`conduit-example.toml`](../configuration.md) **and take a moment to read it.
|
||||
You need to change at least the server name.**
|
||||
You can also choose to use a different database backend, but right now only `rocksdb` and `sqlite` are recommended.
|
||||
|
||||
```toml
|
||||
[global]
|
||||
# The server_name is the pretty name of this server. It is used as a suffix for user
|
||||
# and room ids. Examples: matrix.org, conduit.rs
|
||||
|
||||
# The Conduit server needs all /_matrix/ requests to be reachable at
|
||||
# https://your.server.name/ on port 443 (client-server) and 8448 (federation).
|
||||
|
||||
# If that's not possible for you, you can create /.well-known files to redirect
|
||||
# requests. See
|
||||
# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client
|
||||
# and
|
||||
# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server
|
||||
# for more information
|
||||
|
||||
# YOU NEED TO EDIT THIS
|
||||
#server_name = "your.server.name"
|
||||
|
||||
# This is the only directory where Conduit will save its data
|
||||
database_path = "/var/lib/matrix-conduit/"
|
||||
database_backend = "rocksdb"
|
||||
|
||||
# The port Conduit will be running on. You need to set up a reverse proxy in
|
||||
# your web server (e.g. apache or nginx), so all requests to /_matrix on port
|
||||
# 443 and 8448 will be forwarded to the Conduit instance running on this port
|
||||
# Docker users: Don't change this, you'll need to map an external port to this.
|
||||
port = 6167
|
||||
|
||||
# Max size for uploads
|
||||
max_request_size = 20_000_000 # in bytes
|
||||
|
||||
# Enables registration. If set to false, no users can register on this server.
|
||||
allow_registration = true
|
||||
|
||||
allow_federation = true
|
||||
allow_check_for_updates = true
|
||||
|
||||
# Server to get public keys from. You probably shouldn't change this
|
||||
trusted_servers = ["matrix.org"]
|
||||
|
||||
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||
#log = "warn,state_res=warn,rocket=off,_=off,sled=off"
|
||||
|
||||
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
||||
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
|
||||
```
|
||||
|
||||
## Setting the correct file permissions
|
||||
|
||||
As we are using a Conduit specific user we need to allow it to read the config. To do that you can run this command on
|
||||
|
@ -273,7 +214,7 @@ server {
|
|||
client_max_body_size 20M;
|
||||
|
||||
location /_matrix/ {
|
||||
proxy_pass http://127.0.0.1:6167$request_uri;
|
||||
proxy_pass http://127.0.0.1:6167;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_buffering off;
|
||||
proxy_read_timeout 5m;
|
||||
|
@ -326,7 +267,7 @@ $ sudo systemctl enable conduit
|
|||
|
||||
## How do I know it works?
|
||||
|
||||
You can open <https://app.element.io>, enter your homeserver and try to register.
|
||||
You can open [a Matrix client](https://matrix.org/ecosystem/clients), enter your homeserver and try to register. If you are using a registration token, use [Element web](https://app.element.io/), [Nheko](https://matrix.org/ecosystem/clients/nheko/) or [SchildiChat web](https://app.schildi.chat/), as they support this feature.
|
||||
|
||||
You can also use these commands as a quick health check.
|
||||
|
||||
|
@ -344,8 +285,8 @@ $ curl https://your.server.name:8448/_matrix/client/versions
|
|||
|
||||
## Audio/Video calls
|
||||
|
||||
For Audio/Video call functionality see the [TURN Guide](TURN.md).
|
||||
For Audio/Video call functionality see the [TURN Guide](../turn.md).
|
||||
|
||||
## Appservices
|
||||
|
||||
If you want to set up an appservice, take a look at the [Appservice Guide](APPSERVICES.md).
|
||||
If you want to set up an appservice, take a look at the [Appservice Guide](../appservices.md).
|
18
docs/deploying/nixos.md
Normal file
18
docs/deploying/nixos.md
Normal file
|
@ -0,0 +1,18 @@
|
|||
# Conduit for NixOS
|
||||
|
||||
Conduit can be acquired by Nix from various places:
|
||||
|
||||
* The `flake.nix` at the root of the repo
|
||||
* The `default.nix` at the root of the repo
|
||||
* From Nixpkgs
|
||||
|
||||
The `flake.nix` and `default.nix` do not (currently) provide a NixOS module, so
|
||||
(for now) [`services.matrix-conduit`][module] from Nixpkgs should be used to
|
||||
configure Conduit.
|
||||
|
||||
If you want to run the latest code, you should get Conduit from the `flake.nix`
|
||||
or `default.nix` and set [`services.matrix-conduit.package`][package]
|
||||
appropriately.
|
||||
|
||||
[module]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit
|
||||
[package]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit.package
|
13
docs/introduction.md
Normal file
13
docs/introduction.md
Normal file
|
@ -0,0 +1,13 @@
|
|||
# Conduit
|
||||
|
||||
{{#include ../README.md:catchphrase}}
|
||||
|
||||
{{#include ../README.md:body}}
|
||||
|
||||
#### How can I deploy my own?
|
||||
|
||||
- [Deployment options](deploying.md)
|
||||
|
||||
If you want to connect an Appservice to Conduit, take a look at the [appservices documentation](appservices.md).
|
||||
|
||||
{{#include ../README.md:footer}}
|
74
engage.toml
Normal file
74
engage.toml
Normal file
|
@ -0,0 +1,74 @@
|
|||
interpreter = ["bash", "-euo", "pipefail", "-c"]
|
||||
|
||||
[[task]]
|
||||
name = "engage"
|
||||
group = "versions"
|
||||
script = "engage --version"
|
||||
|
||||
[[task]]
|
||||
name = "rustc"
|
||||
group = "versions"
|
||||
script = "rustc --version"
|
||||
|
||||
[[task]]
|
||||
name = "cargo"
|
||||
group = "versions"
|
||||
script = "cargo --version"
|
||||
|
||||
[[task]]
|
||||
name = "cargo-fmt"
|
||||
group = "versions"
|
||||
script = "cargo fmt --version"
|
||||
|
||||
[[task]]
|
||||
name = "rustdoc"
|
||||
group = "versions"
|
||||
script = "rustdoc --version"
|
||||
|
||||
[[task]]
|
||||
name = "cargo-clippy"
|
||||
group = "versions"
|
||||
script = "cargo clippy -- --version"
|
||||
|
||||
[[task]]
|
||||
name = "lychee"
|
||||
group = "versions"
|
||||
script = "lychee --version"
|
||||
|
||||
[[task]]
|
||||
name = "cargo-fmt"
|
||||
group = "lints"
|
||||
script = "cargo fmt --check -- --color=always"
|
||||
|
||||
[[task]]
|
||||
name = "cargo-doc"
|
||||
group = "lints"
|
||||
script = """
|
||||
RUSTDOCFLAGS="-D warnings" cargo doc \
|
||||
--workspace \
|
||||
--no-deps \
|
||||
--document-private-items \
|
||||
--color always
|
||||
"""
|
||||
|
||||
[[task]]
|
||||
name = "cargo-clippy"
|
||||
group = "lints"
|
||||
script = "cargo clippy --workspace --all-targets --color=always -- -D warnings"
|
||||
|
||||
[[task]]
|
||||
name = "lychee"
|
||||
group = "lints"
|
||||
script = "lychee --offline docs"
|
||||
|
||||
[[task]]
|
||||
name = "cargo"
|
||||
group = "tests"
|
||||
script = """
|
||||
cargo test \
|
||||
--workspace \
|
||||
--all-targets \
|
||||
--color=always \
|
||||
-- \
|
||||
--color=always
|
||||
"""
|
206
flake.lock
generated
206
flake.lock
generated
|
@ -1,22 +1,41 @@
|
|||
{
|
||||
"nodes": {
|
||||
"crane": {
|
||||
"attic": {
|
||||
"inputs": {
|
||||
"crane": "crane",
|
||||
"flake-compat": "flake-compat",
|
||||
"flake-utils": [
|
||||
"flake-utils"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"rust-overlay": "rust-overlay"
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"nixpkgs-stable": "nixpkgs-stable"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1688772518,
|
||||
"narHash": "sha256-ol7gZxwvgLnxNSZwFTDJJ49xVY5teaSvF7lzlo3YQfM=",
|
||||
"lastModified": 1707922053,
|
||||
"narHash": "sha256-wSZjK+rOXn+UQiP1NbdNn5/UW6UcBxjvlqr2wh++MbM=",
|
||||
"owner": "zhaofengli",
|
||||
"repo": "attic",
|
||||
"rev": "6eabc3f02fae3683bffab483e614bebfcd476b21",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "zhaofengli",
|
||||
"ref": "main",
|
||||
"repo": "attic",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"crane": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"attic",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1702918879,
|
||||
"narHash": "sha256-tWJqzajIvYcaRWxn+cLUB9L9Pv4dQ3Bfit/YjU5ze3g=",
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "8b08e96c9af8c6e3a2b69af5a7fa168750fcf88e",
|
||||
"rev": "7195c00c272fdd92fc74e7d5a0a2844b9fadb2fb",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -25,6 +44,27 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"crane_2": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1707685877,
|
||||
"narHash": "sha256-XoXRS+5whotelr1rHiZle5t5hDg9kpguS5yk8c8qzOc=",
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "2c653e4478476a52c6aa3ac0495e4dea7449ea0e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "2c653e4478476a52c6aa3ac0495e4dea7449ea0e",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"fenix": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
|
@ -33,11 +73,11 @@
|
|||
"rust-analyzer-src": "rust-analyzer-src"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1689488573,
|
||||
"narHash": "sha256-diVASflKCCryTYv0djvMnP2444mFsIG0ge5pa7ahauQ=",
|
||||
"lastModified": 1709619709,
|
||||
"narHash": "sha256-l6EPVJfwfelWST7qWQeP6t/TDK3HHv5uUB1b2vw4mOQ=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "39096fe3f379036ff4a5fa198950b8e79defe939",
|
||||
"rev": "c8943ea9e98d41325ff57d4ec14736d330b321b2",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -62,16 +102,29 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"flake-compat_2": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1689068808,
|
||||
"narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=",
|
||||
"lastModified": 1696426674,
|
||||
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils": {
|
||||
"locked": {
|
||||
"lastModified": 1667395993,
|
||||
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4",
|
||||
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -80,13 +133,78 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils_2": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1709126324,
|
||||
"narHash": "sha256-q6EQdSeUZOG26WelxqkmR7kArjgWCdw5sfJVHPH/7j8=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "d465f4819400de7c8d874d50b982301f28a84605",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix-filter": {
|
||||
"locked": {
|
||||
"lastModified": 1705332318,
|
||||
"narHash": "sha256-kcw1yFeJe9N4PjQji9ZeX47jg0p9A0DuU4djKvg1a7I=",
|
||||
"owner": "numtide",
|
||||
"repo": "nix-filter",
|
||||
"rev": "3449dc925982ad46246cfc36469baf66e1b64f17",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "nix-filter",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1689444953,
|
||||
"narHash": "sha256-0o56bfb2LC38wrinPdCGLDScd77LVcr7CrH1zK7qvDg=",
|
||||
"lastModified": 1702539185,
|
||||
"narHash": "sha256-KnIRG5NMdLIpEkZTnN5zovNYc0hhXjAgv6pfd5Z4c7U=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "8acef304efe70152463a6399f73e636bcc363813",
|
||||
"rev": "aa9d4729cbc99dabacb50e3994dcefb3ea0f7447",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-stable": {
|
||||
"locked": {
|
||||
"lastModified": 1702780907,
|
||||
"narHash": "sha256-blbrBBXjjZt6OKTcYX1jpe9SRof2P9ZYWPzq22tzXAA=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "1e2e384c5b7c50dbf8e9c441a9e58d85f408b01f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-23.11",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1709479366,
|
||||
"narHash": "sha256-n6F0n8UV6lnTZbYPl1A9q1BS0p4hduAv1mGAP17CVd0=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "b8697e57f10292a6165a20f03d2f42920dfaf973",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -98,20 +216,23 @@
|
|||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"crane": "crane",
|
||||
"attic": "attic",
|
||||
"crane": "crane_2",
|
||||
"fenix": "fenix",
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": "nixpkgs"
|
||||
"flake-compat": "flake-compat_2",
|
||||
"flake-utils": "flake-utils_2",
|
||||
"nix-filter": "nix-filter",
|
||||
"nixpkgs": "nixpkgs_2"
|
||||
}
|
||||
},
|
||||
"rust-analyzer-src": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1689441253,
|
||||
"narHash": "sha256-4MSDZaFI4DOfsLIZYPMBl0snzWhX1/OqR/QHir382CY=",
|
||||
"lastModified": 1709571018,
|
||||
"narHash": "sha256-ISFrxHxE0J5g7lDAscbK88hwaT5uewvWoma9TlFmRzM=",
|
||||
"owner": "rust-lang",
|
||||
"repo": "rust-analyzer",
|
||||
"rev": "996e054f1eb1dbfc8455ecabff0f6ff22ba7f7c8",
|
||||
"rev": "9f14343f9ee24f53f17492c5f9b653427e2ad15e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -121,31 +242,6 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"rust-overlay": {
|
||||
"inputs": {
|
||||
"flake-utils": [
|
||||
"crane",
|
||||
"flake-utils"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"crane",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1688351637,
|
||||
"narHash": "sha256-CLTufJ29VxNOIZ8UTg0lepsn3X03AmopmaLTTeHDCL4=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "f9b92316727af9e6c7fee4a761242f7f46880329",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
|
|
320
flake.nix
320
flake.nix
|
@ -2,92 +2,316 @@
|
|||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable";
|
||||
flake-utils.url = "github:numtide/flake-utils";
|
||||
nix-filter.url = "github:numtide/nix-filter";
|
||||
flake-compat = {
|
||||
url = "github:edolstra/flake-compat";
|
||||
flake = false;
|
||||
};
|
||||
|
||||
fenix = {
|
||||
url = "github:nix-community/fenix";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
crane = {
|
||||
url = "github:ipetkov/crane";
|
||||
# Pin latest crane that's not affected by the following bugs:
|
||||
#
|
||||
# * <https://github.com/ipetkov/crane/issues/527#issuecomment-1978079140>
|
||||
# * <https://github.com/toml-rs/toml/issues/691>
|
||||
# * <https://github.com/toml-rs/toml/issues/267>
|
||||
url = "github:ipetkov/crane?rev=2c653e4478476a52c6aa3ac0495e4dea7449ea0e";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
inputs.flake-utils.follows = "flake-utils";
|
||||
};
|
||||
attic.url = "github:zhaofengli/attic?ref=main";
|
||||
};
|
||||
|
||||
outputs =
|
||||
{ self
|
||||
, nixpkgs
|
||||
, flake-utils
|
||||
, nix-filter
|
||||
|
||||
, fenix
|
||||
, crane
|
||||
, ...
|
||||
}: flake-utils.lib.eachDefaultSystem (system:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
|
||||
# Use mold on Linux
|
||||
stdenv = if pkgs.stdenv.isLinux then
|
||||
pkgs.stdenvAdapters.useMoldLinker pkgs.stdenv
|
||||
else
|
||||
pkgs.stdenv;
|
||||
pkgsHost = nixpkgs.legacyPackages.${system};
|
||||
|
||||
# Nix-accessible `Cargo.toml`
|
||||
cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml);
|
||||
|
||||
# The Rust toolchain to use
|
||||
toolchain = fenix.packages.${system}.toolchainOf {
|
||||
# Use the Rust version defined in `Cargo.toml`
|
||||
channel = cargoToml.package.rust-version;
|
||||
toolchain = fenix.packages.${system}.fromToolchainFile {
|
||||
file = ./rust-toolchain.toml;
|
||||
|
||||
# THE rust-version HASH
|
||||
sha256 = "sha256-gdYqng0y9iHYzYPAdkC/ka3DRny3La/S5G8ASj0Ayyc=";
|
||||
# See also `rust-toolchain.toml`
|
||||
sha256 = "sha256-SXRtAuO4IqNOQq+nLbrsDFbVk+3aVA8NNpSZsKlVH/8=";
|
||||
};
|
||||
|
||||
# The system's RocksDB
|
||||
ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb}/include";
|
||||
ROCKSDB_LIB_DIR = "${pkgs.rocksdb}/lib";
|
||||
builder = pkgs:
|
||||
((crane.mkLib pkgs).overrideToolchain toolchain).buildPackage;
|
||||
|
||||
# Shared between the package and the devShell
|
||||
nativeBuildInputs = (with pkgs.rustPlatform; [
|
||||
bindgenHook
|
||||
]);
|
||||
nativeBuildInputs = pkgs: [
|
||||
# bindgen needs the build platform's libclang. Apparently due to
|
||||
# "splicing weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't
|
||||
# quite do the right thing here.
|
||||
pkgs.pkgsBuildHost.rustPlatform.bindgenHook
|
||||
];
|
||||
|
||||
builder =
|
||||
((crane.mkLib pkgs).overrideToolchain toolchain.toolchain).buildPackage;
|
||||
rocksdb' = pkgs:
|
||||
let
|
||||
version = "8.11.3";
|
||||
in
|
||||
pkgs.rocksdb.overrideAttrs (old: {
|
||||
inherit version;
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "facebook";
|
||||
repo = "rocksdb";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-OpEiMwGxZuxb9o3RQuSrwZMQGLhe9xLT1aa3HpI4KPs=";
|
||||
};
|
||||
});
|
||||
|
||||
env = pkgs: {
|
||||
CONDUIT_VERSION_EXTRA = self.shortRev or self.dirtyShortRev;
|
||||
ROCKSDB_INCLUDE_DIR = "${rocksdb' pkgs}/include";
|
||||
ROCKSDB_LIB_DIR = "${rocksdb' pkgs}/lib";
|
||||
}
|
||||
// pkgs.lib.optionalAttrs pkgs.stdenv.hostPlatform.isStatic {
|
||||
ROCKSDB_STATIC = "";
|
||||
}
|
||||
// {
|
||||
CARGO_BUILD_RUSTFLAGS = let inherit (pkgs) lib stdenv; in
|
||||
lib.concatStringsSep " " ([]
|
||||
++ lib.optionals
|
||||
# This disables PIE for static builds, which isn't great in terms
|
||||
# of security. Unfortunately, my hand is forced because nixpkgs'
|
||||
# `libstdc++.a` is built without `-fPIE`, which precludes us from
|
||||
# leaving PIE enabled.
|
||||
stdenv.hostPlatform.isStatic
|
||||
["-C" "relocation-model=static"]
|
||||
++ lib.optionals
|
||||
(stdenv.buildPlatform.config != stdenv.hostPlatform.config)
|
||||
["-l" "c"]
|
||||
++ lib.optionals
|
||||
# This check has to match the one [here][0]. We only need to set
|
||||
# these flags when using a different linker. Don't ask me why,
|
||||
# though, because I don't know. All I know is it breaks otherwise.
|
||||
#
|
||||
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L37-L40
|
||||
(
|
||||
# Nixpkgs doesn't check for x86_64 here but we do, because I
|
||||
# observed a failure building statically for x86_64 without
|
||||
# including it here. Linkers are weird.
|
||||
(stdenv.hostPlatform.isAarch64 || stdenv.hostPlatform.isx86_64)
|
||||
&& stdenv.hostPlatform.isStatic
|
||||
&& !stdenv.isDarwin
|
||||
&& !stdenv.cc.bintools.isLLVM
|
||||
)
|
||||
[
|
||||
"-l"
|
||||
"stdc++"
|
||||
"-L"
|
||||
"${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib"
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
# What follows is stolen from [here][0]. Its purpose is to properly
|
||||
# configure compilers and linkers for various stages of the build, and
|
||||
# even covers the case of build scripts that need native code compiled and
|
||||
# run on the build platform (I think).
|
||||
#
|
||||
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L57-L80
|
||||
// (
|
||||
let
|
||||
inherit (pkgs.rust.lib) envVars;
|
||||
in
|
||||
pkgs.lib.optionalAttrs
|
||||
(pkgs.stdenv.targetPlatform.rust.rustcTarget
|
||||
!= pkgs.stdenv.hostPlatform.rust.rustcTarget)
|
||||
(
|
||||
let
|
||||
inherit (pkgs.stdenv.targetPlatform.rust) cargoEnvVarTarget;
|
||||
in
|
||||
{
|
||||
packages.default = builder {
|
||||
src = ./.;
|
||||
"CC_${cargoEnvVarTarget}" = envVars.ccForTarget;
|
||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget;
|
||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" =
|
||||
envVars.linkerForTarget;
|
||||
}
|
||||
)
|
||||
// (
|
||||
let
|
||||
inherit (pkgs.stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget;
|
||||
in
|
||||
{
|
||||
"CC_${cargoEnvVarTarget}" = envVars.ccForHost;
|
||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForHost;
|
||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost;
|
||||
CARGO_BUILD_TARGET = rustcTarget;
|
||||
}
|
||||
)
|
||||
// (
|
||||
let
|
||||
inherit (pkgs.stdenv.buildPlatform.rust) cargoEnvVarTarget;
|
||||
in
|
||||
{
|
||||
"CC_${cargoEnvVarTarget}" = envVars.ccForBuild;
|
||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild;
|
||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild;
|
||||
HOST_CC = "${pkgs.pkgsBuildHost.stdenv.cc}/bin/cc";
|
||||
HOST_CXX = "${pkgs.pkgsBuildHost.stdenv.cc}/bin/c++";
|
||||
}
|
||||
));
|
||||
|
||||
inherit
|
||||
stdenv
|
||||
nativeBuildInputs
|
||||
ROCKSDB_INCLUDE_DIR
|
||||
ROCKSDB_LIB_DIR;
|
||||
package = pkgs: builder pkgs {
|
||||
src = nix-filter {
|
||||
root = ./.;
|
||||
include = [
|
||||
"src"
|
||||
"Cargo.toml"
|
||||
"Cargo.lock"
|
||||
];
|
||||
};
|
||||
|
||||
devShells.default = (pkgs.mkShell.override { inherit stdenv; }) {
|
||||
# Rust Analyzer needs to be able to find the path to default crate
|
||||
# sources, and it can read this environment variable to do so
|
||||
RUST_SRC_PATH = "${toolchain.rust-src}/lib/rustlib/src/rust/library";
|
||||
# This is redundant with CI
|
||||
doCheck = false;
|
||||
|
||||
inherit
|
||||
ROCKSDB_INCLUDE_DIR
|
||||
ROCKSDB_LIB_DIR;
|
||||
env = env pkgs;
|
||||
nativeBuildInputs = nativeBuildInputs pkgs;
|
||||
|
||||
meta.mainProgram = cargoToml.package.name;
|
||||
};
|
||||
|
||||
mkOciImage = pkgs: package:
|
||||
pkgs.dockerTools.buildImage {
|
||||
name = package.pname;
|
||||
tag = "next";
|
||||
copyToRoot = [
|
||||
pkgs.dockerTools.caCertificates
|
||||
];
|
||||
config = {
|
||||
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT)
|
||||
# are handled as expected
|
||||
Entrypoint = [
|
||||
"${pkgs.lib.getExe' pkgs.tini "tini"}"
|
||||
"--"
|
||||
];
|
||||
Cmd = [
|
||||
"${pkgs.lib.getExe package}"
|
||||
];
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
packages = {
|
||||
default = package pkgsHost;
|
||||
oci-image = mkOciImage pkgsHost self.packages.${system}.default;
|
||||
|
||||
book =
|
||||
let
|
||||
package = self.packages.${system}.default;
|
||||
in
|
||||
pkgsHost.stdenv.mkDerivation {
|
||||
pname = "${package.pname}-book";
|
||||
version = package.version;
|
||||
|
||||
src = nix-filter {
|
||||
root = ./.;
|
||||
include = [
|
||||
"book.toml"
|
||||
"conduit-example.toml"
|
||||
"README.md"
|
||||
"debian/README.md"
|
||||
"docs"
|
||||
];
|
||||
};
|
||||
|
||||
nativeBuildInputs = (with pkgsHost; [
|
||||
mdbook
|
||||
]);
|
||||
|
||||
buildPhase = ''
|
||||
mdbook build
|
||||
mv public $out
|
||||
'';
|
||||
};
|
||||
}
|
||||
//
|
||||
builtins.listToAttrs
|
||||
(builtins.concatLists
|
||||
(builtins.map
|
||||
(crossSystem:
|
||||
let
|
||||
binaryName = "static-${crossSystem}";
|
||||
pkgsCrossStatic =
|
||||
(import nixpkgs {
|
||||
inherit system;
|
||||
crossSystem = {
|
||||
config = crossSystem;
|
||||
};
|
||||
}).pkgsStatic;
|
||||
in
|
||||
[
|
||||
# An output for a statically-linked binary
|
||||
{
|
||||
name = binaryName;
|
||||
value = package pkgsCrossStatic;
|
||||
}
|
||||
|
||||
# An output for an OCI image based on that binary
|
||||
{
|
||||
name = "oci-image-${crossSystem}";
|
||||
value = mkOciImage
|
||||
pkgsCrossStatic
|
||||
self.packages.${system}.${binaryName};
|
||||
}
|
||||
]
|
||||
)
|
||||
[
|
||||
"x86_64-unknown-linux-musl"
|
||||
"aarch64-unknown-linux-musl"
|
||||
]
|
||||
)
|
||||
);
|
||||
|
||||
devShells.default = pkgsHost.mkShell {
|
||||
env = env pkgsHost // {
|
||||
# Rust Analyzer needs to be able to find the path to default crate
|
||||
# sources, and it can read this environment variable to do so. The
|
||||
# `rust-src` component is required in order for this to work.
|
||||
RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library";
|
||||
};
|
||||
|
||||
# Development tools
|
||||
nativeBuildInputs = nativeBuildInputs ++ (with toolchain; [
|
||||
cargo
|
||||
clippy
|
||||
rust-src
|
||||
rustc
|
||||
rustfmt
|
||||
]);
|
||||
};
|
||||
nativeBuildInputs = nativeBuildInputs pkgsHost ++ [
|
||||
# Always use nightly rustfmt because most of its options are unstable
|
||||
#
|
||||
# This needs to come before `toolchain` in this list, otherwise
|
||||
# `$PATH` will have stable rustfmt instead.
|
||||
fenix.packages.${system}.latest.rustfmt
|
||||
|
||||
checks = {
|
||||
packagesDefault = self.packages.${system}.default;
|
||||
devShellsDefault = self.devShells.${system}.default;
|
||||
toolchain
|
||||
] ++ (with pkgsHost; [
|
||||
engage
|
||||
|
||||
# Needed for producing Debian packages
|
||||
cargo-deb
|
||||
|
||||
# Needed for Complement
|
||||
go
|
||||
olm
|
||||
|
||||
# Needed for our script for Complement
|
||||
jq
|
||||
|
||||
# Needed for finding broken markdown links
|
||||
lychee
|
||||
|
||||
# Useful for editing the book locally
|
||||
mdbook
|
||||
]);
|
||||
};
|
||||
});
|
||||
}
|
||||
|
|
198
nix/README.md
198
nix/README.md
|
@ -1,198 +0,0 @@
|
|||
# Conduit for Nix/NixOS
|
||||
|
||||
This guide assumes you have a recent version of Nix (^2.4) installed.
|
||||
|
||||
Since Conduit ships as a Nix flake, you'll first need to [enable
|
||||
flakes][enable_flakes].
|
||||
|
||||
You can now use the usual Nix commands to interact with Conduit's flake. For
|
||||
example, `nix run gitlab:famedly/conduit` will run Conduit (though you'll need
|
||||
to provide configuration and such manually as usual).
|
||||
|
||||
If your NixOS configuration is defined as a flake, you can depend on this flake
|
||||
to provide a more up-to-date version than provided by `nixpkgs`. In your flake,
|
||||
add the following to your `inputs`:
|
||||
|
||||
```nix
|
||||
conduit = {
|
||||
url = "gitlab:famedly/conduit";
|
||||
|
||||
# Assuming you have an input for nixpkgs called `nixpkgs`. If you experience
|
||||
# build failures while using this, try commenting/deleting this line. This
|
||||
# will probably also require you to always build from source.
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
```
|
||||
|
||||
Next, make sure you're passing your flake inputs to the `specialArgs` argument
|
||||
of `nixpkgs.lib.nixosSystem` [as explained here][specialargs]. This guide will
|
||||
assume you've named the group `flake-inputs`.
|
||||
|
||||
Now you can configure Conduit and a reverse proxy for it. Add the following to
|
||||
a new Nix file and include it in your configuration:
|
||||
|
||||
```nix
|
||||
{ config
|
||||
, pkgs
|
||||
, flake-inputs
|
||||
, ...
|
||||
}:
|
||||
|
||||
let
|
||||
# You'll need to edit these values
|
||||
|
||||
# The hostname that will appear in your user and room IDs
|
||||
server_name = "example.com";
|
||||
|
||||
# The hostname that Conduit actually runs on
|
||||
#
|
||||
# This can be the same as `server_name` if you want. This is only necessary
|
||||
# when Conduit is running on a different machine than the one hosting your
|
||||
# root domain. This configuration also assumes this is all running on a single
|
||||
# machine, some tweaks will need to be made if this is not the case.
|
||||
matrix_hostname = "matrix.${server_name}";
|
||||
|
||||
# An admin email for TLS certificate notifications
|
||||
admin_email = "admin@${server_name}";
|
||||
|
||||
# These ones you can leave alone
|
||||
|
||||
# Build a dervation that stores the content of `${server_name}/.well-known/matrix/server`
|
||||
well_known_server = pkgs.writeText "well-known-matrix-server" ''
|
||||
{
|
||||
"m.server": "${matrix_hostname}"
|
||||
}
|
||||
'';
|
||||
|
||||
# Build a dervation that stores the content of `${server_name}/.well-known/matrix/client`
|
||||
well_known_client = pkgs.writeText "well-known-matrix-client" ''
|
||||
{
|
||||
"m.homeserver": {
|
||||
"base_url": "https://${matrix_hostname}"
|
||||
}
|
||||
}
|
||||
'';
|
||||
in
|
||||
|
||||
{
|
||||
# Configure Conduit itself
|
||||
services.matrix-conduit = {
|
||||
enable = true;
|
||||
|
||||
# This causes NixOS to use the flake defined in this repository instead of
|
||||
# the build of Conduit built into nixpkgs.
|
||||
package = flake-inputs.conduit.packages.${pkgs.system}.default;
|
||||
|
||||
settings.global = {
|
||||
inherit server_name;
|
||||
};
|
||||
};
|
||||
|
||||
# Configure automated TLS acquisition/renewal
|
||||
security.acme = {
|
||||
acceptTerms = true;
|
||||
defaults = {
|
||||
email = admin_email;
|
||||
};
|
||||
};
|
||||
|
||||
# ACME data must be readable by the NGINX user
|
||||
users.users.nginx.extraGroups = [
|
||||
"acme"
|
||||
];
|
||||
|
||||
# Configure NGINX as a reverse proxy
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
recommendedProxySettings = true;
|
||||
|
||||
virtualHosts = {
|
||||
"${matrix_hostname}" = {
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
|
||||
listen = [
|
||||
{
|
||||
addr = "0.0.0.0";
|
||||
port = 443;
|
||||
ssl = true;
|
||||
}
|
||||
{
|
||||
addr = "[::]";
|
||||
port = 443;
|
||||
ssl = true;
|
||||
} {
|
||||
addr = "0.0.0.0";
|
||||
port = 8448;
|
||||
ssl = true;
|
||||
}
|
||||
{
|
||||
addr = "[::]";
|
||||
port = 8448;
|
||||
ssl = true;
|
||||
}
|
||||
];
|
||||
|
||||
locations."/_matrix/" = {
|
||||
proxyPass = "http://backend_conduit$request_uri";
|
||||
proxyWebsockets = true;
|
||||
extraConfig = ''
|
||||
proxy_set_header Host $host;
|
||||
proxy_buffering off;
|
||||
'';
|
||||
};
|
||||
|
||||
extraConfig = ''
|
||||
merge_slashes off;
|
||||
'';
|
||||
};
|
||||
|
||||
"${server_name}" = {
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
|
||||
locations."=/.well-known/matrix/server" = {
|
||||
# Use the contents of the derivation built previously
|
||||
alias = "${well_known_server}";
|
||||
|
||||
extraConfig = ''
|
||||
# Set the header since by default NGINX thinks it's just bytes
|
||||
default_type application/json;
|
||||
'';
|
||||
};
|
||||
|
||||
locations."=/.well-known/matrix/client" = {
|
||||
# Use the contents of the derivation built previously
|
||||
alias = "${well_known_client}";
|
||||
|
||||
extraConfig = ''
|
||||
# Set the header since by default NGINX thinks it's just bytes
|
||||
default_type application/json;
|
||||
|
||||
# https://matrix.org/docs/spec/client_server/r0.4.0#web-browser-clients
|
||||
add_header Access-Control-Allow-Origin "*";
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
upstreams = {
|
||||
"backend_conduit" = {
|
||||
servers = {
|
||||
"[::1]:${toString config.services.matrix-conduit.settings.global.port}" = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Open firewall ports for HTTP, HTTPS, and Matrix federation
|
||||
networking.firewall.allowedTCPPorts = [ 80 443 8448 ];
|
||||
networking.firewall.allowedUDPPorts = [ 80 443 8448 ];
|
||||
}
|
||||
```
|
||||
|
||||
Now you can rebuild your system configuration and you should be good to go!
|
||||
|
||||
[enable_flakes]: https://nixos.wiki/wiki/Flakes#Enable_flakes
|
||||
|
||||
[specialargs]: https://nixos.wiki/wiki/Flakes#Using_nix_flakes_with_NixOS
|
22
rust-toolchain.toml
Normal file
22
rust-toolchain.toml
Normal file
|
@ -0,0 +1,22 @@
|
|||
# This is the authoritiative configuration of this project's Rust toolchain.
|
||||
#
|
||||
# Other files that need upkeep when this changes:
|
||||
#
|
||||
# * `.gitlab-ci.yml`
|
||||
# * `Cargo.toml`
|
||||
# * `flake.nix`
|
||||
#
|
||||
# Search in those files for `rust-toolchain.toml` to find the relevant places.
|
||||
# If you're having trouble making the relevant changes, bug a maintainer.
|
||||
|
||||
[toolchain]
|
||||
channel = "1.75.0"
|
||||
components = [
|
||||
# For rust-analyzer
|
||||
"rust-src",
|
||||
]
|
||||
targets = [
|
||||
"x86_64-unknown-linux-gnu",
|
||||
"x86_64-unknown-linux-musl",
|
||||
"aarch64-unknown-linux-musl",
|
||||
]
|
|
@ -1,23 +1,34 @@
|
|||
use crate::{services, utils, Error, Result};
|
||||
use bytes::BytesMut;
|
||||
use ruma::api::{IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken};
|
||||
use ruma::api::{
|
||||
appservice::Registration, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken,
|
||||
};
|
||||
use std::{fmt::Debug, mem, time::Duration};
|
||||
use tracing::warn;
|
||||
|
||||
/// Sends a request to an appservice
|
||||
///
|
||||
/// Only returns None if there is no url specified in the appservice registration file
|
||||
#[tracing::instrument(skip(request))]
|
||||
pub(crate) async fn send_request<T: OutgoingRequest>(
|
||||
registration: serde_yaml::Value,
|
||||
registration: Registration,
|
||||
request: T,
|
||||
) -> Result<T::IncomingResponse>
|
||||
) -> Result<Option<T::IncomingResponse>>
|
||||
where
|
||||
T: Debug,
|
||||
{
|
||||
let destination = registration.get("url").unwrap().as_str().unwrap();
|
||||
let hs_token = registration.get("hs_token").unwrap().as_str().unwrap();
|
||||
let destination = match registration.url {
|
||||
Some(url) => url,
|
||||
None => {
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
|
||||
let hs_token = registration.hs_token.as_str();
|
||||
|
||||
let mut http_request = request
|
||||
.try_into_http_request::<BytesMut>(
|
||||
destination,
|
||||
&destination,
|
||||
SendAccessToken::IfRequired(hs_token),
|
||||
&[MatrixVersion::V1_0],
|
||||
)
|
||||
|
@ -39,8 +50,7 @@ where
|
|||
);
|
||||
*http_request.uri_mut() = parts.try_into().expect("our manipulation is always valid");
|
||||
|
||||
let mut reqwest_request = reqwest::Request::try_from(http_request)
|
||||
.expect("all http requests are valid reqwest requests");
|
||||
let mut reqwest_request = reqwest::Request::try_from(http_request)?;
|
||||
|
||||
*reqwest_request.timeout_mut() = Some(Duration::from_secs(30));
|
||||
|
||||
|
@ -55,9 +65,7 @@ where
|
|||
Err(e) => {
|
||||
warn!(
|
||||
"Could not send request to appservice {:?} at {}: {}",
|
||||
registration.get("id"),
|
||||
destination,
|
||||
e
|
||||
registration.id, destination, e
|
||||
);
|
||||
return Err(e.into());
|
||||
}
|
||||
|
@ -95,7 +103,8 @@ where
|
|||
.body(body)
|
||||
.expect("reqwest body is valid http body"),
|
||||
);
|
||||
response.map_err(|_| {
|
||||
|
||||
response.map(Some).map_err(|_| {
|
||||
warn!(
|
||||
"Appservice returned invalid response bytes {}\n{}",
|
||||
destination, url
|
||||
|
|
|
@ -3,7 +3,8 @@ use crate::{api::client_server, services, utils, Error, Result, Ruma};
|
|||
use ruma::{
|
||||
api::client::{
|
||||
account::{
|
||||
change_password, deactivate, get_3pids, get_username_availability, register,
|
||||
change_password, deactivate, get_3pids, get_username_availability,
|
||||
register::{self, LoginType},
|
||||
request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn,
|
||||
whoami, ThirdPartyIdRemovalStatus,
|
||||
},
|
||||
|
@ -84,6 +85,13 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
|
|||
));
|
||||
}
|
||||
|
||||
if body.body.login_type == Some(LoginType::ApplicationService) && !body.from_appservice {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::MissingToken,
|
||||
"Missing appservice token.",
|
||||
));
|
||||
}
|
||||
|
||||
let is_guest = body.kind == RegistrationKind::Guest;
|
||||
|
||||
let user_id = match (&body.username, is_guest) {
|
||||
|
@ -239,7 +247,14 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
|
|||
|
||||
// If this is the first real user, grant them admin privileges
|
||||
// Note: the server user, @conduit:servername, is generated first
|
||||
if services().users.count()? == 2 {
|
||||
if !is_guest {
|
||||
if let Some(admin_room) = services().admin.get_admin_room()? {
|
||||
if services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_joined_count(&admin_room)?
|
||||
== Some(1)
|
||||
{
|
||||
services()
|
||||
.admin
|
||||
.make_user_admin(&user_id, displayname)
|
||||
|
@ -247,6 +262,8 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
|
|||
|
||||
warn!("Granting {} admin privileges as the first user", user_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(register::v3::Response {
|
||||
access_token: Some(token),
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
use crate::{services, Error, Result, Ruma};
|
||||
use rand::seq::SliceRandom;
|
||||
use regex::Regex;
|
||||
use ruma::{
|
||||
api::{
|
||||
appservice,
|
||||
|
@ -101,31 +100,20 @@ pub(crate) async fn get_alias_helper(
|
|||
match services().rooms.alias.resolve_local_alias(&room_alias)? {
|
||||
Some(r) => room_id = Some(r),
|
||||
None => {
|
||||
for (_id, registration) in services().appservice.all()? {
|
||||
let aliases = registration
|
||||
.get("namespaces")
|
||||
.and_then(|ns| ns.get("aliases"))
|
||||
.and_then(|aliases| aliases.as_sequence())
|
||||
.map_or_else(Vec::new, |aliases| {
|
||||
aliases
|
||||
.iter()
|
||||
.filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok())
|
||||
.collect::<Vec<_>>()
|
||||
});
|
||||
|
||||
if aliases
|
||||
.iter()
|
||||
.any(|aliases| aliases.is_match(room_alias.as_str()))
|
||||
&& services()
|
||||
for appservice in services().appservice.read().await.values() {
|
||||
if appservice.aliases.is_match(room_alias.as_str())
|
||||
&& matches!(
|
||||
services()
|
||||
.sending
|
||||
.send_appservice_request(
|
||||
registration,
|
||||
appservice.registration.clone(),
|
||||
appservice::query::query_room_alias::v1::Request {
|
||||
room_alias: room_alias.clone(),
|
||||
},
|
||||
)
|
||||
.await
|
||||
.is_ok()
|
||||
.await,
|
||||
Ok(Some(_opt_result))
|
||||
)
|
||||
{
|
||||
room_id = Some(
|
||||
services()
|
||||
|
|
|
@ -339,17 +339,19 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
|||
|
||||
let mut failures = BTreeMap::new();
|
||||
|
||||
let back_off = |id| match services()
|
||||
let back_off = |id| async {
|
||||
match services()
|
||||
.globals
|
||||
.bad_query_ratelimiter
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(id)
|
||||
{
|
||||
hash_map::Entry::Vacant(e) => {
|
||||
e.insert((Instant::now(), 1));
|
||||
}
|
||||
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
||||
}
|
||||
};
|
||||
|
||||
let mut futures: FuturesUnordered<_> = get_over_federation
|
||||
|
@ -359,8 +361,8 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
|||
.globals
|
||||
.bad_query_ratelimiter
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(&*server)
|
||||
.await
|
||||
.get(server)
|
||||
{
|
||||
// Exponential backoff
|
||||
let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries);
|
||||
|
@ -393,7 +395,7 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
|||
),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| Error::BadServerResponse("Query took too long")),
|
||||
.map_err(|_e| Error::BadServerResponse("Query took too long")),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
@ -428,7 +430,8 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
|||
device_keys.extend(response.device_keys);
|
||||
}
|
||||
_ => {
|
||||
back_off(server.to_owned());
|
||||
back_off(server.to_owned()).await;
|
||||
|
||||
failures.insert(server.to_string(), json!({}));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ pub async fn create_content_route(
|
|||
.await?;
|
||||
|
||||
Ok(create_content::v3::Response {
|
||||
content_uri: mxc.try_into().expect("Invalid mxc:// URI"),
|
||||
content_uri: mxc.into(),
|
||||
blurhash: None,
|
||||
})
|
||||
}
|
||||
|
|
|
@ -26,9 +26,10 @@ use ruma::{
|
|||
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
||||
use std::{
|
||||
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet},
|
||||
sync::{Arc, RwLock},
|
||||
sync::Arc,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use crate::{
|
||||
|
@ -64,7 +65,12 @@ pub async fn join_room_by_id_route(
|
|||
.map(|user| user.server_name().to_owned()),
|
||||
);
|
||||
|
||||
servers.push(body.room_id.server_name().to_owned());
|
||||
servers.push(
|
||||
body.room_id
|
||||
.server_name()
|
||||
.expect("Room IDs should always have a server name")
|
||||
.into(),
|
||||
);
|
||||
|
||||
join_room_by_id_helper(
|
||||
body.sender_user.as_deref(),
|
||||
|
@ -105,7 +111,12 @@ pub async fn join_room_by_id_or_alias_route(
|
|||
.map(|user| user.server_name().to_owned()),
|
||||
);
|
||||
|
||||
servers.push(room_id.server_name().to_owned());
|
||||
servers.push(
|
||||
room_id
|
||||
.server_name()
|
||||
.expect("Room IDs should always have a server name")
|
||||
.into(),
|
||||
);
|
||||
|
||||
(servers, room_id)
|
||||
}
|
||||
|
@ -202,13 +213,16 @@ pub async fn kick_user_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(body.room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
||||
|
@ -219,7 +233,8 @@ pub async fn kick_user_route(
|
|||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
drop(state_lock);
|
||||
|
||||
|
@ -255,6 +270,7 @@ pub async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<ban_use
|
|||
serde_json::from_str(event.content.get())
|
||||
.map(|event: RoomMemberEventContent| RoomMemberEventContent {
|
||||
membership: MembershipState::Ban,
|
||||
join_authorized_via_users_server: None,
|
||||
..event
|
||||
})
|
||||
.map_err(|_| Error::bad_database("Invalid member event in database."))
|
||||
|
@ -266,13 +282,16 @@ pub async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<ban_use
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(body.room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
||||
|
@ -283,7 +302,8 @@ pub async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<ban_use
|
|||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
drop(state_lock);
|
||||
|
||||
|
@ -324,13 +344,16 @@ pub async fn unban_user_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(body.room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
||||
|
@ -341,7 +364,8 @@ pub async fn unban_user_route(
|
|||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
drop(state_lock);
|
||||
|
||||
|
@ -400,7 +424,7 @@ pub async fn get_member_events_route(
|
|||
if !services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_state_events(&sender_user, &body.room_id)?
|
||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
|
@ -435,7 +459,7 @@ pub async fn joined_members_route(
|
|||
if !services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_state_events(&sender_user, &body.room_id)?
|
||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
|
@ -479,7 +503,7 @@ async fn join_room_by_id_helper(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -619,7 +643,7 @@ async fn join_room_by_id_helper(
|
|||
));
|
||||
}
|
||||
|
||||
if let Ok(signature) = signed_value["signatures"]
|
||||
match signed_value["signatures"]
|
||||
.as_object()
|
||||
.ok_or(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
|
@ -630,20 +654,22 @@ async fn join_room_by_id_helper(
|
|||
ErrorKind::InvalidParam,
|
||||
"Server did not send its signature",
|
||||
))
|
||||
})
|
||||
{
|
||||
}) {
|
||||
Ok(signature) => {
|
||||
join_event
|
||||
.get_mut("signatures")
|
||||
.expect("we created a valid pdu")
|
||||
.as_object_mut()
|
||||
.expect("we created a valid pdu")
|
||||
.insert(remote_server.to_string(), signature.clone());
|
||||
} else {
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"Server {remote_server} sent invalid signature in sendjoin signatures for event {signed_value:?}",
|
||||
"Server {remote_server} sent invalid signature in sendjoin signatures for event {signed_value:?}: {e:?}",
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
services().rooms.short.get_or_create_shortroomid(room_id)?;
|
||||
|
||||
|
@ -668,7 +694,7 @@ async fn join_room_by_id_helper(
|
|||
.iter()
|
||||
.map(|pdu| validate_and_add_event_id(pdu, &room_version_id, &pub_key_map))
|
||||
{
|
||||
let (event_id, value) = match result {
|
||||
let (event_id, value) = match result.await {
|
||||
Ok(t) => t,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
@ -698,7 +724,7 @@ async fn join_room_by_id_helper(
|
|||
.iter()
|
||||
.map(|pdu| validate_and_add_event_id(pdu, &room_version_id, &pub_key_map))
|
||||
{
|
||||
let (event_id, value) = match result {
|
||||
let (event_id, value) = match result.await {
|
||||
Ok(t) => t,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
@ -710,7 +736,7 @@ async fn join_room_by_id_helper(
|
|||
}
|
||||
|
||||
info!("Running send_join auth check");
|
||||
if !state_res::event_auth::auth_check(
|
||||
let authenticated = state_res::event_auth::auth_check(
|
||||
&state_res::RoomVersion::new(&room_version_id).expect("room version is supported"),
|
||||
&parsed_join_pdu,
|
||||
None::<PduEvent>, // TODO: third party invite
|
||||
|
@ -733,7 +759,9 @@ async fn join_room_by_id_helper(
|
|||
.map_err(|e| {
|
||||
warn!("Auth check failed: {e}");
|
||||
Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")
|
||||
})? {
|
||||
})?;
|
||||
|
||||
if !authenticated {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Auth check failed",
|
||||
|
@ -770,12 +798,16 @@ async fn join_room_by_id_helper(
|
|||
let statehash_after_join = services().rooms.state.append_to_state(&parsed_join_pdu)?;
|
||||
|
||||
info!("Appending new room join event");
|
||||
services().rooms.timeline.append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.append_pdu(
|
||||
&parsed_join_pdu,
|
||||
join_event,
|
||||
vec![(*parsed_join_pdu.event_id).to_owned()],
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
info!("Setting final room state for new room");
|
||||
// We set the room state after inserting the pdu, so that we never have a moment in time
|
||||
|
@ -888,7 +920,10 @@ async fn join_room_by_id_helper(
|
|||
};
|
||||
|
||||
// Try normal join first
|
||||
let error = match services().rooms.timeline.build_and_append_pdu(
|
||||
let error = match services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
||||
|
@ -899,7 +934,9 @@ async fn join_room_by_id_helper(
|
|||
sender_user,
|
||||
room_id,
|
||||
&state_lock,
|
||||
) {
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(_event_id) => return Ok(join_room_by_id::v3::Response::new(room_id.to_owned())),
|
||||
Err(e) => e,
|
||||
};
|
||||
|
@ -1095,7 +1132,7 @@ async fn make_join_request(
|
|||
make_join_response_and_server
|
||||
}
|
||||
|
||||
fn validate_and_add_event_id(
|
||||
async fn validate_and_add_event_id(
|
||||
pdu: &RawJsonValue,
|
||||
room_version: &RoomVersionId,
|
||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
|
@ -1111,24 +1148,26 @@ fn validate_and_add_event_id(
|
|||
))
|
||||
.expect("ruma's reference hashes are valid event ids");
|
||||
|
||||
let back_off = |id| match services()
|
||||
let back_off = |id| async {
|
||||
match services()
|
||||
.globals
|
||||
.bad_event_ratelimiter
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(id)
|
||||
{
|
||||
Entry::Vacant(e) => {
|
||||
e.insert((Instant::now(), 1));
|
||||
}
|
||||
Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
||||
}
|
||||
};
|
||||
|
||||
if let Some((time, tries)) = services()
|
||||
.globals
|
||||
.bad_event_ratelimiter
|
||||
.read()
|
||||
.unwrap()
|
||||
.await
|
||||
.get(&event_id)
|
||||
{
|
||||
// Exponential backoff
|
||||
|
@ -1143,15 +1182,10 @@ fn validate_and_add_event_id(
|
|||
}
|
||||
}
|
||||
|
||||
if let Err(e) = ruma::signatures::verify_event(
|
||||
&*pub_key_map
|
||||
.read()
|
||||
.map_err(|_| Error::bad_database("RwLock is poisoned."))?,
|
||||
&value,
|
||||
room_version,
|
||||
) {
|
||||
if let Err(e) = ruma::signatures::verify_event(&*pub_key_map.read().await, &value, room_version)
|
||||
{
|
||||
warn!("Event {} failed verification {:?} {}", event_id, pdu, e);
|
||||
back_off(event_id);
|
||||
back_off(event_id).await;
|
||||
return Err(Error::BadServerResponse("Event failed verification."));
|
||||
}
|
||||
|
||||
|
@ -1177,7 +1211,7 @@ pub(crate) async fn invite_helper<'a>(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -1298,13 +1332,16 @@ pub(crate) async fn invite_helper<'a>(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
|
@ -1325,7 +1362,8 @@ pub(crate) async fn invite_helper<'a>(
|
|||
sender_user,
|
||||
room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
drop(state_lock);
|
||||
|
||||
|
@ -1362,7 +1400,7 @@ pub async fn leave_all_rooms(user_id: &UserId) -> Result<()> {
|
|||
pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<String>) -> Result<()> {
|
||||
// Ask a remote server if we don't have this room
|
||||
if !services().rooms.metadata.exists(room_id)?
|
||||
&& room_id.server_name() != services().globals.server_name()
|
||||
&& room_id.server_name() != Some(services().globals.server_name())
|
||||
{
|
||||
if let Err(e) = remote_leave_room(user_id, room_id).await {
|
||||
warn!("Failed to leave room {} remotely: {}", user_id, e);
|
||||
|
@ -1393,7 +1431,7 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<Strin
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -1428,8 +1466,12 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<Strin
|
|||
|
||||
event.membership = MembershipState::Leave;
|
||||
event.reason = reason;
|
||||
event.join_authorized_via_users_server = None;
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
||||
|
@ -1440,7 +1482,8 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<Strin
|
|||
user_id,
|
||||
room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
|
|
@ -32,7 +32,7 @@ pub async fn send_message_event_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(body.room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -73,7 +73,10 @@ pub async fn send_message_event_route(
|
|||
let mut unsigned = BTreeMap::new();
|
||||
unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into());
|
||||
|
||||
let event_id = services().rooms.timeline.build_and_append_pdu(
|
||||
let event_id = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: body.event_type.to_string().into(),
|
||||
content: serde_json::from_str(body.body.body.json().get())
|
||||
|
@ -85,7 +88,8 @@ pub async fn send_message_event_route(
|
|||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
services().transaction_ids.add_txnid(
|
||||
sender_user,
|
||||
|
@ -124,14 +128,13 @@ pub async fn get_message_events_route(
|
|||
let to = body
|
||||
.to
|
||||
.as_ref()
|
||||
.and_then(|t| PduCount::try_from_string(&t).ok());
|
||||
.and_then(|t| PduCount::try_from_string(t).ok());
|
||||
|
||||
services().rooms.lazy_loading.lazy_load_confirm_delivery(
|
||||
sender_user,
|
||||
sender_device,
|
||||
&body.room_id,
|
||||
from,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.lazy_loading
|
||||
.lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)
|
||||
.await?;
|
||||
|
||||
let limit = u64::from(body.limit).min(100) as usize;
|
||||
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
use crate::{services, utils, Result, Ruma};
|
||||
use ruma::api::client::presence::{get_presence, set_presence};
|
||||
use crate::{services, utils, Error, Result, Ruma};
|
||||
use ruma::api::client::{
|
||||
error::ErrorKind,
|
||||
presence::{get_presence, set_presence},
|
||||
};
|
||||
use std::time::Duration;
|
||||
|
||||
/// # `PUT /_matrix/client/r0/presence/{userId}/status`
|
||||
|
@ -79,6 +82,9 @@ pub async fn get_presence_route(
|
|||
presence: presence.content.presence,
|
||||
})
|
||||
} else {
|
||||
todo!();
|
||||
Err(Error::BadRequest(
|
||||
ErrorKind::NotFound,
|
||||
"Presence state for this user was not found",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,6 +40,7 @@ pub async fn set_displayname_route(
|
|||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
displayname: body.displayname.clone(),
|
||||
join_authorized_via_users_server: None,
|
||||
..serde_json::from_str(
|
||||
services()
|
||||
.rooms
|
||||
|
@ -77,18 +78,17 @@ pub async fn set_displayname_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
let _ = services().rooms.timeline.build_and_append_pdu(
|
||||
pdu_builder,
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
);
|
||||
let _ = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)
|
||||
.await;
|
||||
|
||||
// Presence update
|
||||
services().rooms.edus.presence.update_presence(
|
||||
|
@ -175,6 +175,7 @@ pub async fn set_avatar_url_route(
|
|||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
avatar_url: body.avatar_url.clone(),
|
||||
join_authorized_via_users_server: None,
|
||||
..serde_json::from_str(
|
||||
services()
|
||||
.rooms
|
||||
|
@ -212,18 +213,17 @@ pub async fn set_avatar_url_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
let _ = services().rooms.timeline.build_and_append_pdu(
|
||||
pdu_builder,
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
);
|
||||
let _ = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)
|
||||
.await;
|
||||
|
||||
// Presence update
|
||||
services().rooms.edus.presence.update_presence(
|
||||
|
|
|
@ -24,13 +24,16 @@ pub async fn redact_event_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(body.room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
let event_id = services().rooms.timeline.build_and_append_pdu(
|
||||
let event_id = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomRedaction,
|
||||
content: to_raw_value(&RoomRedactionEventContent {
|
||||
|
@ -45,7 +48,8 @@ pub async fn redact_event_route(
|
|||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
drop(state_lock);
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route(
|
|||
let to = body
|
||||
.to
|
||||
.as_ref()
|
||||
.and_then(|t| PduCount::try_from_string(&t).ok());
|
||||
.and_then(|t| PduCount::try_from_string(t).ok());
|
||||
|
||||
// Use limit or else 10, with maximum 100
|
||||
let limit = body
|
||||
|
@ -73,7 +73,7 @@ pub async fn get_relating_events_with_rel_type_route(
|
|||
let to = body
|
||||
.to
|
||||
.as_ref()
|
||||
.and_then(|t| PduCount::try_from_string(&t).ok());
|
||||
.and_then(|t| PduCount::try_from_string(t).ok());
|
||||
|
||||
// Use limit or else 10, with maximum 100
|
||||
let limit = body
|
||||
|
@ -121,7 +121,7 @@ pub async fn get_relating_events_route(
|
|||
let to = body
|
||||
.to
|
||||
.as_ref()
|
||||
.and_then(|t| PduCount::try_from_string(&t).ok());
|
||||
.and_then(|t| PduCount::try_from_string(t).ok());
|
||||
|
||||
// Use limit or else 10, with maximum 100
|
||||
let limit = body
|
||||
|
|
|
@ -61,7 +61,7 @@ pub async fn create_room_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -204,7 +204,10 @@ pub async fn create_room_route(
|
|||
}
|
||||
|
||||
// 1. The room create event
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomCreate,
|
||||
content: to_raw_value(&content).expect("event is valid, we just created it"),
|
||||
|
@ -215,10 +218,14 @@ pub async fn create_room_route(
|
|||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 2. Let the room creator join
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
|
@ -239,7 +246,8 @@ pub async fn create_room_route(
|
|||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 3. Power levels
|
||||
|
||||
|
@ -276,7 +284,10 @@ pub async fn create_room_route(
|
|||
}
|
||||
}
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomPowerLevels,
|
||||
content: to_raw_value(&power_levels_content)
|
||||
|
@ -288,11 +299,15 @@ pub async fn create_room_route(
|
|||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 4. Canonical room alias
|
||||
if let Some(room_alias_id) = &alias {
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomCanonicalAlias,
|
||||
content: to_raw_value(&RoomCanonicalAliasEventContent {
|
||||
|
@ -307,13 +322,17 @@ pub async fn create_room_route(
|
|||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// 5. Events set by preset
|
||||
|
||||
// 5.1 Join Rules
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomJoinRules,
|
||||
content: to_raw_value(&RoomJoinRulesEventContent::new(match preset {
|
||||
|
@ -329,10 +348,14 @@ pub async fn create_room_route(
|
|||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 5.2 History Visibility
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomHistoryVisibility,
|
||||
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
|
||||
|
@ -346,10 +369,14 @@ pub async fn create_room_route(
|
|||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 5.3 Guest Access
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomGuestAccess,
|
||||
content: to_raw_value(&RoomGuestAccessEventContent::new(match preset {
|
||||
|
@ -364,7 +391,8 @@ pub async fn create_room_route(
|
|||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 6. Events listed in initial_state
|
||||
for event in &body.initial_state {
|
||||
|
@ -383,20 +411,22 @@ pub async fn create_room_route(
|
|||
continue;
|
||||
}
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
pdu_builder,
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// 7. Events implied by name and topic
|
||||
if let Some(name) = &body.name {
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomName,
|
||||
content: to_raw_value(&RoomNameEventContent::new(Some(name.clone())))
|
||||
content: to_raw_value(&RoomNameEventContent::new(name.clone()))
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
|
@ -405,11 +435,15 @@ pub async fn create_room_route(
|
|||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
if let Some(topic) = &body.topic {
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomTopic,
|
||||
content: to_raw_value(&RoomTopicEventContent {
|
||||
|
@ -423,7 +457,8 @@ pub async fn create_room_route(
|
|||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// 8. Events implied by invite (and TODO: invite_3pid)
|
||||
|
@ -553,7 +588,7 @@ pub async fn upgrade_room_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(body.room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -561,7 +596,10 @@ pub async fn upgrade_room_route(
|
|||
|
||||
// Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further
|
||||
// Fail if the sender does not have the required permissions
|
||||
let tombstone_event_id = services().rooms.timeline.build_and_append_pdu(
|
||||
let tombstone_event_id = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomTombstone,
|
||||
content: to_raw_value(&RoomTombstoneEventContent {
|
||||
|
@ -576,7 +614,8 @@ pub async fn upgrade_room_route(
|
|||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Change lock to replacement room
|
||||
drop(state_lock);
|
||||
|
@ -585,7 +624,7 @@ pub async fn upgrade_room_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(replacement_room.clone())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -661,7 +700,10 @@ pub async fn upgrade_room_route(
|
|||
));
|
||||
}
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomCreate,
|
||||
content: to_raw_value(&create_event_content)
|
||||
|
@ -673,10 +715,14 @@ pub async fn upgrade_room_route(
|
|||
sender_user,
|
||||
&replacement_room,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Join the new room
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
|
@ -697,7 +743,8 @@ pub async fn upgrade_room_route(
|
|||
sender_user,
|
||||
&replacement_room,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Recommended transferable state events list from the specs
|
||||
let transferable_state_events = vec![
|
||||
|
@ -724,7 +771,10 @@ pub async fn upgrade_room_route(
|
|||
None => continue, // Skipping missing events.
|
||||
};
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: event_type.to_string().into(),
|
||||
content: event_content,
|
||||
|
@ -735,7 +785,8 @@ pub async fn upgrade_room_route(
|
|||
sender_user,
|
||||
&replacement_room,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// Moves any local aliases to the new room
|
||||
|
@ -769,7 +820,10 @@ pub async fn upgrade_room_route(
|
|||
power_levels_event_content.invite = new_level;
|
||||
|
||||
// Modify the power levels in the old room to prevent sending of events and inviting new users
|
||||
let _ = services().rooms.timeline.build_and_append_pdu(
|
||||
let _ = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomPowerLevels,
|
||||
content: to_raw_value(&power_levels_event_content)
|
||||
|
@ -781,7 +835,8 @@ pub async fn upgrade_room_route(
|
|||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
drop(state_lock);
|
||||
|
||||
|
|
|
@ -42,24 +42,31 @@ pub async fn get_login_types_route(
|
|||
/// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see
|
||||
/// supported login types.
|
||||
pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Response> {
|
||||
// To allow deprecated login methods
|
||||
#![allow(deprecated)]
|
||||
// Validate login method
|
||||
// TODO: Other login methods
|
||||
let user_id = match &body.login_info {
|
||||
login::v3::LoginInfo::Password(login::v3::Password {
|
||||
identifier,
|
||||
password,
|
||||
user,
|
||||
address: _,
|
||||
medium: _,
|
||||
}) => {
|
||||
let username = if let UserIdentifier::UserIdOrLocalpart(user_id) = identifier {
|
||||
user_id.to_lowercase()
|
||||
let user_id = if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
|
||||
UserId::parse_with_server_name(
|
||||
user_id.to_lowercase(),
|
||||
services().globals.server_name(),
|
||||
)
|
||||
} else if let Some(user) = user {
|
||||
UserId::parse(user)
|
||||
} else {
|
||||
warn!("Bad login type: {:?}", &body.login_info);
|
||||
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
|
||||
};
|
||||
let user_id =
|
||||
UserId::parse_with_server_name(username, services().globals.server_name())
|
||||
.map_err(|_| {
|
||||
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
|
||||
})?;
|
||||
}
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;
|
||||
|
||||
let hash = services()
|
||||
.users
|
||||
.password_hash(&user_id)?
|
||||
|
@ -105,24 +112,28 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
|||
));
|
||||
}
|
||||
}
|
||||
login::v3::LoginInfo::ApplicationService(login::v3::ApplicationService { identifier }) => {
|
||||
login::v3::LoginInfo::ApplicationService(login::v3::ApplicationService {
|
||||
identifier,
|
||||
user,
|
||||
}) => {
|
||||
if !body.from_appservice {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
"Forbidden login type.",
|
||||
ErrorKind::MissingToken,
|
||||
"Missing appservice token.",
|
||||
));
|
||||
};
|
||||
let username = if let UserIdentifier::UserIdOrLocalpart(user_id) = identifier {
|
||||
user_id.to_lowercase()
|
||||
if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
|
||||
UserId::parse_with_server_name(
|
||||
user_id.to_lowercase(),
|
||||
services().globals.server_name(),
|
||||
)
|
||||
} else if let Some(user) = user {
|
||||
UserId::parse(user)
|
||||
} else {
|
||||
warn!("Bad login type: {:?}", &body.login_info);
|
||||
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
|
||||
};
|
||||
let user_id =
|
||||
UserId::parse_with_server_name(username, services().globals.server_name())
|
||||
.map_err(|_| {
|
||||
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
|
||||
})?;
|
||||
user_id
|
||||
}
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?
|
||||
}
|
||||
_ => {
|
||||
warn!("Unsupported or unknown login type: {:?}", &body.login_info);
|
||||
|
@ -163,6 +174,8 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
|||
|
||||
info!("{} logged in", user_id);
|
||||
|
||||
// Homeservers are still required to send the `home_server` field
|
||||
#[allow(deprecated)]
|
||||
Ok(login::v3::Response {
|
||||
user_id,
|
||||
access_token: token,
|
||||
|
|
|
@ -85,7 +85,7 @@ pub async fn get_state_events_route(
|
|||
if !services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_state_events(&sender_user, &body.room_id)?
|
||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
|
@ -118,7 +118,7 @@ pub async fn get_state_events_for_key_route(
|
|||
if !services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_state_events(&sender_user, &body.room_id)?
|
||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
|
@ -157,7 +157,7 @@ pub async fn get_state_events_for_empty_key_route(
|
|||
if !services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_state_events(&sender_user, &body.room_id)?
|
||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
|
@ -227,13 +227,16 @@ async fn send_state_event_for_key_helper(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
let event_id = services().rooms.timeline.build_and_append_pdu(
|
||||
let event_id = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: event_type.to_string().into(),
|
||||
content: serde_json::from_str(json.json().get()).expect("content is valid json"),
|
||||
|
@ -244,7 +247,8 @@ async fn send_state_event_for_key_helper(
|
|||
sender_user,
|
||||
room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(event_id)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
use crate::{
|
||||
service::rooms::timeline::PduCount, services, Error, PduEvent, Result, Ruma, RumaResponse,
|
||||
};
|
||||
|
||||
use ruma::{
|
||||
api::client::{
|
||||
filter::{FilterDefinition, LazyLoadOptions},
|
||||
|
@ -20,7 +21,7 @@ use ruma::{
|
|||
StateEventType, TimelineEventType,
|
||||
},
|
||||
serde::Raw,
|
||||
uint, DeviceId, OwnedDeviceId, OwnedUserId, RoomId, UInt, UserId,
|
||||
uint, DeviceId, JsOption, OwnedDeviceId, OwnedUserId, RoomId, UInt, UserId,
|
||||
};
|
||||
use std::{
|
||||
collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet},
|
||||
|
@ -28,7 +29,7 @@ use std::{
|
|||
time::Duration,
|
||||
};
|
||||
use tokio::sync::watch::Sender;
|
||||
use tracing::error;
|
||||
use tracing::{error, info};
|
||||
|
||||
/// # `GET /_matrix/client/r0/sync`
|
||||
///
|
||||
|
@ -75,7 +76,7 @@ pub async fn sync_events_route(
|
|||
.globals
|
||||
.sync_receivers
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry((sender_user.clone(), sender_device.clone()))
|
||||
{
|
||||
Entry::Vacant(v) => {
|
||||
|
@ -98,6 +99,8 @@ pub async fn sync_events_route(
|
|||
|
||||
o.insert((body.since.clone(), rx.clone()));
|
||||
|
||||
info!("Sync started for {sender_user}");
|
||||
|
||||
tokio::spawn(sync_helper_wrapper(
|
||||
sender_user.clone(),
|
||||
sender_device.clone(),
|
||||
|
@ -147,7 +150,7 @@ async fn sync_helper_wrapper(
|
|||
.globals
|
||||
.sync_receivers
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry((sender_user, sender_device))
|
||||
{
|
||||
Entry::Occupied(o) => {
|
||||
|
@ -302,11 +305,11 @@ async fn sync_helper(
|
|||
.globals
|
||||
.roomid_mutex_insert
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let insert_lock = mutex_insert.lock().unwrap();
|
||||
let insert_lock = mutex_insert.lock().await;
|
||||
drop(insert_lock);
|
||||
}
|
||||
|
||||
|
@ -434,11 +437,11 @@ async fn sync_helper(
|
|||
.globals
|
||||
.roomid_mutex_insert
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let insert_lock = mutex_insert.lock().unwrap();
|
||||
let insert_lock = mutex_insert.lock().await;
|
||||
drop(insert_lock);
|
||||
}
|
||||
|
||||
|
@ -554,6 +557,7 @@ async fn sync_helper(
|
|||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn load_joined_room(
|
||||
sender_user: &UserId,
|
||||
sender_device: &DeviceId,
|
||||
|
@ -576,11 +580,11 @@ async fn load_joined_room(
|
|||
.globals
|
||||
.roomid_mutex_insert
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
let insert_lock = mutex_insert.lock().unwrap();
|
||||
let insert_lock = mutex_insert.lock().await;
|
||||
drop(insert_lock);
|
||||
}
|
||||
|
||||
|
@ -590,7 +594,7 @@ async fn load_joined_room(
|
|||
|| services()
|
||||
.rooms
|
||||
.user
|
||||
.last_notification_read(&sender_user, &room_id)?
|
||||
.last_notification_read(sender_user, room_id)?
|
||||
> since;
|
||||
|
||||
let mut timeline_users = HashSet::new();
|
||||
|
@ -598,17 +602,16 @@ async fn load_joined_room(
|
|||
timeline_users.insert(event.sender.as_str().to_owned());
|
||||
}
|
||||
|
||||
services().rooms.lazy_loading.lazy_load_confirm_delivery(
|
||||
&sender_user,
|
||||
&sender_device,
|
||||
&room_id,
|
||||
sincecount,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.lazy_loading
|
||||
.lazy_load_confirm_delivery(sender_user, sender_device, room_id, sincecount)
|
||||
.await?;
|
||||
|
||||
// Database queries:
|
||||
|
||||
let current_shortstatehash =
|
||||
if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? {
|
||||
if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? {
|
||||
s
|
||||
} else {
|
||||
error!("Room {} has no state", room_id);
|
||||
|
@ -618,7 +621,7 @@ async fn load_joined_room(
|
|||
let since_shortstatehash = services()
|
||||
.rooms
|
||||
.user
|
||||
.get_token_shortstatehash(&room_id, since)?;
|
||||
.get_token_shortstatehash(room_id, since)?;
|
||||
|
||||
let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) =
|
||||
if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) {
|
||||
|
@ -630,12 +633,12 @@ async fn load_joined_room(
|
|||
let joined_member_count = services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_joined_count(&room_id)?
|
||||
.room_joined_count(room_id)?
|
||||
.unwrap_or(0);
|
||||
let invited_member_count = services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_invited_count(&room_id)?
|
||||
.room_invited_count(room_id)?
|
||||
.unwrap_or(0);
|
||||
|
||||
// Recalculate heroes (first 5 members)
|
||||
|
@ -648,7 +651,7 @@ async fn load_joined_room(
|
|||
for hero in services()
|
||||
.rooms
|
||||
.timeline
|
||||
.all_pdus(&sender_user, &room_id)?
|
||||
.all_pdus(sender_user, room_id)?
|
||||
.filter_map(|pdu| pdu.ok()) // Ignore all broken pdus
|
||||
.filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember)
|
||||
.map(|(_, pdu)| {
|
||||
|
@ -669,11 +672,11 @@ async fn load_joined_room(
|
|||
) && (services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_joined(&user_id, &room_id)?
|
||||
.is_joined(&user_id, room_id)?
|
||||
|| services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_invited(&user_id, &room_id)?)
|
||||
.is_invited(&user_id, room_id)?)
|
||||
{
|
||||
Ok::<_, Error>(Some(state_key.clone()))
|
||||
} else {
|
||||
|
@ -789,20 +792,24 @@ async fn load_joined_room(
|
|||
|
||||
// Reset lazy loading because this is an initial sync
|
||||
services().rooms.lazy_loading.lazy_load_reset(
|
||||
&sender_user,
|
||||
&sender_device,
|
||||
&room_id,
|
||||
sender_user,
|
||||
sender_device,
|
||||
room_id,
|
||||
)?;
|
||||
|
||||
// The state_events above should contain all timeline_users, let's mark them as lazy
|
||||
// loaded.
|
||||
services().rooms.lazy_loading.lazy_load_mark_sent(
|
||||
&sender_user,
|
||||
&sender_device,
|
||||
&room_id,
|
||||
services()
|
||||
.rooms
|
||||
.lazy_loading
|
||||
.lazy_load_mark_sent(
|
||||
sender_user,
|
||||
sender_device,
|
||||
room_id,
|
||||
lazy_loaded,
|
||||
next_batchcount,
|
||||
);
|
||||
)
|
||||
.await;
|
||||
|
||||
(
|
||||
heroes,
|
||||
|
@ -866,14 +873,14 @@ async fn load_joined_room(
|
|||
}
|
||||
|
||||
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
||||
&sender_user,
|
||||
&sender_device,
|
||||
&room_id,
|
||||
sender_user,
|
||||
sender_device,
|
||||
room_id,
|
||||
&event.sender,
|
||||
)? || lazy_load_send_redundant
|
||||
{
|
||||
if let Some(member_event) = services().rooms.state_accessor.room_state_get(
|
||||
&room_id,
|
||||
room_id,
|
||||
&StateEventType::RoomMember,
|
||||
event.sender.as_str(),
|
||||
)? {
|
||||
|
@ -883,13 +890,17 @@ async fn load_joined_room(
|
|||
}
|
||||
}
|
||||
|
||||
services().rooms.lazy_loading.lazy_load_mark_sent(
|
||||
&sender_user,
|
||||
&sender_device,
|
||||
&room_id,
|
||||
services()
|
||||
.rooms
|
||||
.lazy_loading
|
||||
.lazy_load_mark_sent(
|
||||
sender_user,
|
||||
sender_device,
|
||||
room_id,
|
||||
lazy_loaded,
|
||||
next_batchcount,
|
||||
);
|
||||
)
|
||||
.await;
|
||||
|
||||
let encrypted_room = services()
|
||||
.rooms
|
||||
|
@ -934,7 +945,7 @@ async fn load_joined_room(
|
|||
match new_membership {
|
||||
MembershipState::Join => {
|
||||
// A new user joined an encrypted room
|
||||
if !share_encrypted_room(&sender_user, &user_id, &room_id)? {
|
||||
if !share_encrypted_room(sender_user, &user_id, room_id)? {
|
||||
device_list_updates.insert(user_id);
|
||||
}
|
||||
}
|
||||
|
@ -954,15 +965,15 @@ async fn load_joined_room(
|
|||
services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(&room_id)
|
||||
.room_members(room_id)
|
||||
.flatten()
|
||||
.filter(|user_id| {
|
||||
// Don't send key updates from the sender to the sender
|
||||
&sender_user != user_id
|
||||
sender_user != user_id
|
||||
})
|
||||
.filter(|user_id| {
|
||||
// Only send keys if the sender doesn't share an encrypted room with the target already
|
||||
!share_encrypted_room(&sender_user, user_id, &room_id)
|
||||
!share_encrypted_room(sender_user, user_id, room_id)
|
||||
.unwrap_or(false)
|
||||
}),
|
||||
);
|
||||
|
@ -997,7 +1008,7 @@ async fn load_joined_room(
|
|||
services()
|
||||
.rooms
|
||||
.user
|
||||
.notification_count(&sender_user, &room_id)?
|
||||
.notification_count(sender_user, room_id)?
|
||||
.try_into()
|
||||
.expect("notification count can't go that high"),
|
||||
)
|
||||
|
@ -1010,7 +1021,7 @@ async fn load_joined_room(
|
|||
services()
|
||||
.rooms
|
||||
.user
|
||||
.highlight_count(&sender_user, &room_id)?
|
||||
.highlight_count(sender_user, room_id)?
|
||||
.try_into()
|
||||
.expect("highlight count can't go that high"),
|
||||
)
|
||||
|
@ -1039,15 +1050,22 @@ async fn load_joined_room(
|
|||
.rooms
|
||||
.edus
|
||||
.read_receipt
|
||||
.readreceipts_since(&room_id, since)
|
||||
.readreceipts_since(room_id, since)
|
||||
.filter_map(|r| r.ok()) // Filter out buggy events
|
||||
.map(|(_, _, v)| v)
|
||||
.collect();
|
||||
|
||||
if services().rooms.edus.typing.last_typing_update(&room_id)? > since {
|
||||
if services()
|
||||
.rooms
|
||||
.edus
|
||||
.typing
|
||||
.last_typing_update(room_id)
|
||||
.await?
|
||||
> since
|
||||
{
|
||||
edus.push(
|
||||
serde_json::from_str(
|
||||
&serde_json::to_string(&services().rooms.edus.typing.typings_all(&room_id)?)
|
||||
&serde_json::to_string(&services().rooms.edus.typing.typings_all(room_id).await?)
|
||||
.expect("event is valid, we just created it"),
|
||||
)
|
||||
.expect("event is valid, we just created it"),
|
||||
|
@ -1056,7 +1074,7 @@ async fn load_joined_room(
|
|||
|
||||
// Save the state after this sync so we can send the correct state diff next sync
|
||||
services().rooms.user.associate_token_shortstatehash(
|
||||
&room_id,
|
||||
room_id,
|
||||
next_batch,
|
||||
current_shortstatehash,
|
||||
)?;
|
||||
|
@ -1065,7 +1083,7 @@ async fn load_joined_room(
|
|||
account_data: RoomAccountData {
|
||||
events: services()
|
||||
.account_data
|
||||
.changes_since(Some(&room_id), &sender_user, since)?
|
||||
.changes_since(Some(room_id), sender_user, since)?
|
||||
.into_iter()
|
||||
.filter_map(|(_, v)| {
|
||||
serde_json::from_str(v.json().get())
|
||||
|
@ -1110,13 +1128,13 @@ fn load_timeline(
|
|||
if services()
|
||||
.rooms
|
||||
.timeline
|
||||
.last_timeline_count(&sender_user, &room_id)?
|
||||
.last_timeline_count(sender_user, room_id)?
|
||||
> roomsincecount
|
||||
{
|
||||
let mut non_timeline_pdus = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.pdus_until(&sender_user, &room_id, PduCount::max())?
|
||||
.pdus_until(sender_user, room_id, PduCount::max())?
|
||||
.filter_map(|r| {
|
||||
// Filter out buggy events
|
||||
if r.is_err() {
|
||||
|
@ -1172,7 +1190,6 @@ fn share_encrypted_room(
|
|||
pub async fn sync_events_v4_route(
|
||||
body: Ruma<sync_events::v4::Request>,
|
||||
) -> Result<sync_events::v4::Response, RumaResponse<UiaaResponse>> {
|
||||
dbg!(&body.body);
|
||||
let sender_user = body.sender_user.expect("user is authenticated");
|
||||
let sender_device = body.sender_device.expect("user is authenticated");
|
||||
let mut body = body.body;
|
||||
|
@ -1232,7 +1249,7 @@ pub async fn sync_events_v4_route(
|
|||
|
||||
for room_id in &all_joined_rooms {
|
||||
let current_shortstatehash =
|
||||
if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? {
|
||||
if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? {
|
||||
s
|
||||
} else {
|
||||
error!("Room {} has no state", room_id);
|
||||
|
@ -1242,7 +1259,7 @@ pub async fn sync_events_v4_route(
|
|||
let since_shortstatehash = services()
|
||||
.rooms
|
||||
.user
|
||||
.get_token_shortstatehash(&room_id, globalsince)?;
|
||||
.get_token_shortstatehash(room_id, globalsince)?;
|
||||
|
||||
let since_sender_member: Option<RoomMemberEventContent> = since_shortstatehash
|
||||
.and_then(|shortstatehash| {
|
||||
|
@ -1331,7 +1348,7 @@ pub async fn sync_events_v4_route(
|
|||
if !share_encrypted_room(
|
||||
&sender_user,
|
||||
&user_id,
|
||||
&room_id,
|
||||
room_id,
|
||||
)? {
|
||||
device_list_changes.insert(user_id);
|
||||
}
|
||||
|
@ -1352,7 +1369,7 @@ pub async fn sync_events_v4_route(
|
|||
services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(&room_id)
|
||||
.room_members(room_id)
|
||||
.flatten()
|
||||
.filter(|user_id| {
|
||||
// Don't send key updates from the sender to the sender
|
||||
|
@ -1360,7 +1377,7 @@ pub async fn sync_events_v4_route(
|
|||
})
|
||||
.filter(|user_id| {
|
||||
// Only send keys if the sender doesn't share an encrypted room with the target already
|
||||
!share_encrypted_room(&sender_user, user_id, &room_id)
|
||||
!share_encrypted_room(&sender_user, user_id, room_id)
|
||||
.unwrap_or(false)
|
||||
}),
|
||||
);
|
||||
|
@ -1451,7 +1468,7 @@ pub async fn sync_events_v4_route(
|
|||
}
|
||||
sync_events::v4::SyncOp {
|
||||
op: SlidingOp::Sync,
|
||||
range: Some(r.clone()),
|
||||
range: Some(r),
|
||||
index: None,
|
||||
room_ids,
|
||||
room_id: None,
|
||||
|
@ -1476,6 +1493,9 @@ pub async fn sync_events_v4_route(
|
|||
|
||||
let mut known_subscription_rooms = BTreeSet::new();
|
||||
for (room_id, room) in &body.room_subscriptions {
|
||||
if !services().rooms.metadata.exists(room_id)? {
|
||||
continue;
|
||||
}
|
||||
let todo_room = todo_rooms
|
||||
.entry(room_id.clone())
|
||||
.or_insert((BTreeSet::new(), 0, u64::MAX));
|
||||
|
@ -1523,7 +1543,7 @@ pub async fn sync_events_v4_route(
|
|||
let roomsincecount = PduCount::Normal(*roomsince);
|
||||
|
||||
let (timeline_pdus, limited) =
|
||||
load_timeline(&sender_user, &room_id, roomsincecount, *timeline_limit)?;
|
||||
load_timeline(&sender_user, room_id, roomsincecount, *timeline_limit)?;
|
||||
|
||||
if roomsince != &0 && timeline_pdus.is_empty() {
|
||||
continue;
|
||||
|
@ -1555,30 +1575,31 @@ pub async fn sync_events_v4_route(
|
|||
|
||||
let required_state = required_state_request
|
||||
.iter()
|
||||
.map(|state| {
|
||||
.flat_map(|state| {
|
||||
services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(&room_id, &state.0, &state.1)
|
||||
})
|
||||
.filter_map(|r| r.ok())
|
||||
.filter_map(|o| o)
|
||||
.room_state_get(room_id, &state.0, &state.1)
|
||||
.ok()
|
||||
.flatten()
|
||||
.map(|state| state.to_sync_state_event())
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Heroes
|
||||
let heroes = services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(&room_id)
|
||||
.room_members(room_id)
|
||||
.filter_map(|r| r.ok())
|
||||
.filter(|member| member != &sender_user)
|
||||
.map(|member| {
|
||||
Ok::<_, Error>(
|
||||
.flat_map(|member| {
|
||||
services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_member(&room_id, &member)?
|
||||
.get_member(room_id, &member)
|
||||
.ok()
|
||||
.flatten()
|
||||
.map(|memberevent| {
|
||||
(
|
||||
memberevent
|
||||
|
@ -1586,32 +1607,26 @@ pub async fn sync_events_v4_route(
|
|||
.unwrap_or_else(|| member.to_string()),
|
||||
memberevent.avatar_url,
|
||||
)
|
||||
}),
|
||||
)
|
||||
})
|
||||
.filter_map(|r| r.ok())
|
||||
.filter_map(|o| o)
|
||||
})
|
||||
.take(5)
|
||||
.collect::<Vec<_>>();
|
||||
let name = if heroes.len() > 1 {
|
||||
let last = heroes[0].0.clone();
|
||||
Some(
|
||||
heroes[1..]
|
||||
let name = match &heroes[..] {
|
||||
[] => None,
|
||||
[only] => Some(only.0.clone()),
|
||||
[firsts @ .., last] => Some(
|
||||
firsts
|
||||
.iter()
|
||||
.map(|h| h.0.clone())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
+ " and "
|
||||
+ &last,
|
||||
)
|
||||
} else if heroes.len() == 1 {
|
||||
Some(heroes[0].0.clone())
|
||||
} else {
|
||||
None
|
||||
+ &last.0,
|
||||
),
|
||||
};
|
||||
|
||||
let avatar = if heroes.len() == 1 {
|
||||
heroes[0].1.clone()
|
||||
let avatar = if let [only] = &heroes[..] {
|
||||
only.1.clone()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
@ -1619,16 +1634,16 @@ pub async fn sync_events_v4_route(
|
|||
rooms.insert(
|
||||
room_id.clone(),
|
||||
sync_events::v4::SlidingSyncRoom {
|
||||
name: services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_name(&room_id)?
|
||||
.or_else(|| name),
|
||||
avatar: services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_avatar(&room_id)?
|
||||
.map_or(avatar, |a| a.url),
|
||||
name: services().rooms.state_accessor.get_name(room_id)?.or(name),
|
||||
avatar: if let Some(avatar) = avatar {
|
||||
JsOption::Some(avatar)
|
||||
} else {
|
||||
match services().rooms.state_accessor.get_avatar(room_id)? {
|
||||
JsOption::Some(avatar) => JsOption::from_option(avatar.url),
|
||||
JsOption::Null => JsOption::Null,
|
||||
JsOption::Undefined => JsOption::Undefined,
|
||||
}
|
||||
},
|
||||
initial: Some(roomsince == &0),
|
||||
is_dm: None,
|
||||
invite_state: None,
|
||||
|
@ -1637,7 +1652,7 @@ pub async fn sync_events_v4_route(
|
|||
services()
|
||||
.rooms
|
||||
.user
|
||||
.highlight_count(&sender_user, &room_id)?
|
||||
.highlight_count(&sender_user, room_id)?
|
||||
.try_into()
|
||||
.expect("notification count can't go that high"),
|
||||
),
|
||||
|
@ -1645,7 +1660,7 @@ pub async fn sync_events_v4_route(
|
|||
services()
|
||||
.rooms
|
||||
.user
|
||||
.notification_count(&sender_user, &room_id)?
|
||||
.notification_count(&sender_user, room_id)?
|
||||
.try_into()
|
||||
.expect("notification count can't go that high"),
|
||||
),
|
||||
|
@ -1658,7 +1673,7 @@ pub async fn sync_events_v4_route(
|
|||
(services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_joined_count(&room_id)?
|
||||
.room_joined_count(room_id)?
|
||||
.unwrap_or(0) as u32)
|
||||
.into(),
|
||||
),
|
||||
|
@ -1666,7 +1681,7 @@ pub async fn sync_events_v4_route(
|
|||
(services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_invited_count(&room_id)?
|
||||
.room_invited_count(room_id)?
|
||||
.unwrap_or(0) as u32)
|
||||
.into(),
|
||||
),
|
||||
|
@ -1689,7 +1704,7 @@ pub async fn sync_events_v4_route(
|
|||
let _ = tokio::time::timeout(duration, watcher).await;
|
||||
}
|
||||
|
||||
Ok(dbg!(sync_events::v4::Response {
|
||||
Ok(sync_events::v4::Response {
|
||||
initial: globalsince == 0,
|
||||
txn_id: body.txn_id.clone(),
|
||||
pos: next_batch.to_string(),
|
||||
|
@ -1744,5 +1759,5 @@ pub async fn sync_events_v4_route(
|
|||
},
|
||||
},
|
||||
delta_token: None,
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
|
|
@ -23,17 +23,23 @@ pub async fn create_typing_event_route(
|
|||
}
|
||||
|
||||
if let Typing::Yes(duration) = body.state {
|
||||
services().rooms.edus.typing.typing_add(
|
||||
services()
|
||||
.rooms
|
||||
.edus
|
||||
.typing
|
||||
.typing_add(
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
duration.as_millis() as u64 + utils::millis_since_unix_epoch(),
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
services()
|
||||
.rooms
|
||||
.edus
|
||||
.typing
|
||||
.typing_remove(sender_user, &body.room_id)?;
|
||||
.typing_remove(sender_user, &body.room_id)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(create_typing_event::v3::Response {})
|
||||
|
|
|
@ -26,6 +26,7 @@ pub async fn get_supported_versions_route(
|
|||
"v1.2".to_owned(),
|
||||
"v1.3".to_owned(),
|
||||
"v1.4".to_owned(),
|
||||
"v1.5".to_owned(),
|
||||
],
|
||||
unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]),
|
||||
};
|
||||
|
|
|
@ -48,6 +48,9 @@ pub async fn search_users_route(
|
|||
return None;
|
||||
}
|
||||
|
||||
// It's a matching user, but is the sender allowed to see them?
|
||||
let mut user_visible = false;
|
||||
|
||||
let user_is_in_public_rooms = services()
|
||||
.rooms
|
||||
.state_cache
|
||||
|
@ -69,9 +72,8 @@ pub async fn search_users_route(
|
|||
});
|
||||
|
||||
if user_is_in_public_rooms {
|
||||
return Some(user);
|
||||
}
|
||||
|
||||
user_visible = true;
|
||||
} else {
|
||||
let user_is_in_shared_rooms = services()
|
||||
.rooms
|
||||
.user
|
||||
|
@ -81,10 +83,15 @@ pub async fn search_users_route(
|
|||
.is_some();
|
||||
|
||||
if user_is_in_shared_rooms {
|
||||
return Some(user);
|
||||
user_visible = true;
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
if !user_visible {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(user)
|
||||
});
|
||||
|
||||
let results = users.by_ref().take(limit).collect();
|
||||
|
|
|
@ -15,13 +15,20 @@ use bytes::{Buf, BufMut, Bytes, BytesMut};
|
|||
use http::{Request, StatusCode};
|
||||
use ruma::{
|
||||
api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse},
|
||||
CanonicalJsonValue, OwnedDeviceId, OwnedServerName, UserId,
|
||||
CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId,
|
||||
};
|
||||
use serde::Deserialize;
|
||||
use tracing::{debug, error, warn};
|
||||
|
||||
use super::{Ruma, RumaResponse};
|
||||
use crate::{services, Error, Result};
|
||||
use crate::{service::appservice::RegistrationInfo, services, Error, Result};
|
||||
|
||||
enum Token {
|
||||
Appservice(Box<RegistrationInfo>),
|
||||
User((OwnedUserId, OwnedDeviceId)),
|
||||
Invalid,
|
||||
None,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T, S, B> FromRequest<S, B> for Ruma<T>
|
||||
|
@ -78,36 +85,50 @@ where
|
|||
None => query_params.access_token.as_deref(),
|
||||
};
|
||||
|
||||
let token = if let Some(token) = token {
|
||||
if let Some(reg_info) = services().appservice.find_from_token(token).await {
|
||||
Token::Appservice(Box::new(reg_info.clone()))
|
||||
} else if let Some((user_id, device_id)) = services().users.find_from_token(token)? {
|
||||
Token::User((user_id, OwnedDeviceId::from(device_id)))
|
||||
} else {
|
||||
Token::Invalid
|
||||
}
|
||||
} else {
|
||||
Token::None
|
||||
};
|
||||
|
||||
let mut json_body = serde_json::from_slice::<CanonicalJsonValue>(&body).ok();
|
||||
|
||||
let appservices = services().appservice.all().unwrap();
|
||||
let appservice_registration = appservices.iter().find(|(_id, registration)| {
|
||||
registration
|
||||
.get("as_token")
|
||||
.and_then(|as_token| as_token.as_str())
|
||||
.map_or(false, |as_token| token == Some(as_token))
|
||||
});
|
||||
|
||||
let (sender_user, sender_device, sender_servername, from_appservice) =
|
||||
if let Some((_id, registration)) = appservice_registration {
|
||||
match metadata.authentication {
|
||||
AuthScheme::AccessToken => {
|
||||
let user_id = query_params.user_id.map_or_else(
|
||||
match (metadata.authentication, token) {
|
||||
(_, Token::Invalid) => {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::UnknownToken { soft_logout: false },
|
||||
"Unknown access token.",
|
||||
))
|
||||
}
|
||||
(
|
||||
AuthScheme::AccessToken
|
||||
| AuthScheme::AppserviceToken
|
||||
| AuthScheme::AccessTokenOptional
|
||||
| AuthScheme::None,
|
||||
Token::Appservice(info),
|
||||
) => {
|
||||
let user_id = query_params
|
||||
.user_id
|
||||
.map_or_else(
|
||||
|| {
|
||||
UserId::parse_with_server_name(
|
||||
registration
|
||||
.get("sender_localpart")
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap(),
|
||||
info.registration.sender_localpart.as_str(),
|
||||
services().globals.server_name(),
|
||||
)
|
||||
.unwrap()
|
||||
},
|
||||
|s| UserId::parse(s).unwrap(),
|
||||
);
|
||||
|
||||
if !services().users.exists(&user_id).unwrap() {
|
||||
UserId::parse,
|
||||
)
|
||||
.map_err(|_| {
|
||||
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
|
||||
})?;
|
||||
if !services().users.exists(&user_id)? {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
"User does not exist.",
|
||||
|
@ -117,38 +138,21 @@ where
|
|||
// TODO: Check if appservice is allowed to be that user
|
||||
(Some(user_id), None, None, true)
|
||||
}
|
||||
AuthScheme::ServerSignatures => (None, None, None, true),
|
||||
AuthScheme::None => (None, None, None, true),
|
||||
}
|
||||
} else {
|
||||
match metadata.authentication {
|
||||
AuthScheme::AccessToken => {
|
||||
let token = match token {
|
||||
Some(token) => token,
|
||||
_ => {
|
||||
(AuthScheme::AccessToken, Token::None) => {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::MissingToken,
|
||||
"Missing access token.",
|
||||
))
|
||||
));
|
||||
}
|
||||
(
|
||||
AuthScheme::AccessToken | AuthScheme::AccessTokenOptional | AuthScheme::None,
|
||||
Token::User((user_id, device_id)),
|
||||
) => (Some(user_id), Some(device_id), None, false),
|
||||
(AuthScheme::ServerSignatures, Token::None) => {
|
||||
if !services().globals.allow_federation() {
|
||||
return Err(Error::bad_config("Federation is disabled."));
|
||||
}
|
||||
};
|
||||
|
||||
match services().users.find_from_token(token).unwrap() {
|
||||
None => {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::UnknownToken { soft_logout: false },
|
||||
"Unknown access token.",
|
||||
))
|
||||
}
|
||||
Some((user_id, device_id)) => (
|
||||
Some(user_id),
|
||||
Some(OwnedDeviceId::from(device_id)),
|
||||
None,
|
||||
false,
|
||||
),
|
||||
}
|
||||
}
|
||||
AuthScheme::ServerSignatures => {
|
||||
let TypedHeader(Authorization(x_matrix)) = parts
|
||||
.extract::<TypedHeader<Authorization<XMatrix>>>()
|
||||
.await
|
||||
|
@ -250,7 +254,23 @@ where
|
|||
}
|
||||
}
|
||||
}
|
||||
AuthScheme::None => (None, None, None, false),
|
||||
(
|
||||
AuthScheme::None
|
||||
| AuthScheme::AppserviceToken
|
||||
| AuthScheme::AccessTokenOptional,
|
||||
Token::None,
|
||||
) => (None, None, None, false),
|
||||
(AuthScheme::ServerSignatures, Token::Appservice(_) | Token::User(_)) => {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Unauthorized,
|
||||
"Only server signatures should be used on this endpoint.",
|
||||
));
|
||||
}
|
||||
(AuthScheme::AppserviceToken, Token::User(_)) => {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Unauthorized,
|
||||
"Only appservice access tokens should be used on this endpoint.",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -51,11 +51,12 @@ use std::{
|
|||
fmt::Debug,
|
||||
mem,
|
||||
net::{IpAddr, SocketAddr},
|
||||
sync::{Arc, RwLock},
|
||||
sync::Arc,
|
||||
time::{Duration, Instant, SystemTime},
|
||||
};
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use tracing::{debug, error, trace, warn};
|
||||
use tracing::{debug, error, warn};
|
||||
|
||||
/// Wraps either an literal IP address plus port, or a hostname plus complement
|
||||
/// (colon-plus-port if it was specified).
|
||||
|
@ -137,7 +138,7 @@ where
|
|||
.globals
|
||||
.actual_destination_cache
|
||||
.read()
|
||||
.unwrap()
|
||||
.await
|
||||
.get(destination)
|
||||
.cloned();
|
||||
|
||||
|
@ -232,8 +233,7 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
let reqwest_request = reqwest::Request::try_from(http_request)
|
||||
.expect("all http requests are valid reqwest requests");
|
||||
let reqwest_request = reqwest::Request::try_from(http_request)?;
|
||||
|
||||
let url = reqwest_request.url().clone();
|
||||
|
||||
|
@ -290,7 +290,7 @@ where
|
|||
.globals
|
||||
.actual_destination_cache
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.insert(
|
||||
OwnedServerName::from(destination),
|
||||
(actual_destination, host),
|
||||
|
@ -341,7 +341,7 @@ fn add_port_to_hostname(destination_str: &str) -> FedDest {
|
|||
}
|
||||
|
||||
/// Returns: actual_destination, host header
|
||||
/// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names
|
||||
/// Implemented according to the specification at <https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names>
|
||||
/// Numbers in comments below refer to bullet points in linked section of specification
|
||||
async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDest) {
|
||||
debug!("Finding actual destination for {destination}");
|
||||
|
@ -475,12 +475,11 @@ async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDe
|
|||
(actual_destination, hostname)
|
||||
}
|
||||
|
||||
async fn query_srv_record(hostname: &'_ str) -> Option<FedDest> {
|
||||
let hostname = hostname.trim_end_matches('.');
|
||||
if let Ok(Some(host_port)) = services()
|
||||
async fn query_given_srv_record(record: &str) -> Option<FedDest> {
|
||||
services()
|
||||
.globals
|
||||
.dns_resolver()
|
||||
.srv_lookup(format!("_matrix._tcp.{hostname}."))
|
||||
.srv_lookup(record)
|
||||
.await
|
||||
.map(|srv| {
|
||||
srv.iter().next().map(|result| {
|
||||
|
@ -490,10 +489,17 @@ async fn query_srv_record(hostname: &'_ str) -> Option<FedDest> {
|
|||
)
|
||||
})
|
||||
})
|
||||
.unwrap_or(None)
|
||||
}
|
||||
|
||||
async fn query_srv_record(hostname: &'_ str) -> Option<FedDest> {
|
||||
let hostname = hostname.trim_end_matches('.');
|
||||
|
||||
if let Some(host_port) = query_given_srv_record(&format!("_matrix-fed._tcp.{hostname}.")).await
|
||||
{
|
||||
Some(host_port)
|
||||
} else {
|
||||
None
|
||||
query_given_srv_record(&format!("_matrix._tcp.{hostname}.")).await
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -600,10 +606,6 @@ pub async fn get_server_keys_deprecated_route() -> impl IntoResponse {
|
|||
pub async fn get_public_rooms_filtered_route(
|
||||
body: Ruma<get_public_rooms_filtered::v1::Request>,
|
||||
) -> Result<get_public_rooms_filtered::v1::Response> {
|
||||
if !services().globals.allow_federation() {
|
||||
return Err(Error::bad_config("Federation is disabled."));
|
||||
}
|
||||
|
||||
let response = client_server::get_public_rooms_filtered_helper(
|
||||
None,
|
||||
body.limit,
|
||||
|
@ -627,10 +629,6 @@ pub async fn get_public_rooms_filtered_route(
|
|||
pub async fn get_public_rooms_route(
|
||||
body: Ruma<get_public_rooms::v1::Request>,
|
||||
) -> Result<get_public_rooms::v1::Response> {
|
||||
if !services().globals.allow_federation() {
|
||||
return Err(Error::bad_config("Federation is disabled."));
|
||||
}
|
||||
|
||||
let response = client_server::get_public_rooms_filtered_helper(
|
||||
None,
|
||||
body.limit,
|
||||
|
@ -666,7 +664,7 @@ pub fn parse_incoming_pdu(
|
|||
|
||||
let room_version_id = services().rooms.state.get_room_version(&room_id)?;
|
||||
|
||||
let (event_id, value) = match gen_event_id_canonical_json(&pdu, &room_version_id) {
|
||||
let (event_id, value) = match gen_event_id_canonical_json(pdu, &room_version_id) {
|
||||
Ok(t) => t,
|
||||
Err(_) => {
|
||||
// Event could not be converted to canonical json
|
||||
|
@ -685,10 +683,6 @@ pub fn parse_incoming_pdu(
|
|||
pub async fn send_transaction_message_route(
|
||||
body: Ruma<send_transaction_message::v1::Request>,
|
||||
) -> Result<send_transaction_message::v1::Response> {
|
||||
if !services().globals.allow_federation() {
|
||||
return Err(Error::bad_config("Federation is disabled."));
|
||||
}
|
||||
|
||||
let sender_servername = body
|
||||
.sender_servername
|
||||
.as_ref()
|
||||
|
@ -724,7 +718,7 @@ pub async fn send_transaction_message_route(
|
|||
continue;
|
||||
}
|
||||
|
||||
let r = parse_incoming_pdu(&pdu);
|
||||
let r = parse_incoming_pdu(pdu);
|
||||
let (event_id, value, room_id) = match r {
|
||||
Ok(t) => t,
|
||||
Err(e) => {
|
||||
|
@ -740,7 +734,7 @@ pub async fn send_transaction_message_route(
|
|||
.globals
|
||||
.roomid_mutex_federation
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -837,17 +831,23 @@ pub async fn send_transaction_message_route(
|
|||
.is_joined(&typing.user_id, &typing.room_id)?
|
||||
{
|
||||
if typing.typing {
|
||||
services().rooms.edus.typing.typing_add(
|
||||
services()
|
||||
.rooms
|
||||
.edus
|
||||
.typing
|
||||
.typing_add(
|
||||
&typing.user_id,
|
||||
&typing.room_id,
|
||||
3000 + utils::millis_since_unix_epoch(),
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
services()
|
||||
.rooms
|
||||
.edus
|
||||
.typing
|
||||
.typing_remove(&typing.user_id, &typing.room_id)?;
|
||||
.typing_remove(&typing.user_id, &typing.room_id)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -953,10 +953,6 @@ pub async fn send_transaction_message_route(
|
|||
pub async fn get_event_route(
|
||||
body: Ruma<get_event::v1::Request>,
|
||||
) -> Result<get_event::v1::Response> {
|
||||
if !services().globals.allow_federation() {
|
||||
return Err(Error::bad_config("Federation is disabled."));
|
||||
}
|
||||
|
||||
let sender_servername = body
|
||||
.sender_servername
|
||||
.as_ref()
|
||||
|
@ -992,7 +988,7 @@ pub async fn get_event_route(
|
|||
|
||||
if !services().rooms.state_accessor.server_can_see_event(
|
||||
sender_servername,
|
||||
&room_id,
|
||||
room_id,
|
||||
&body.event_id,
|
||||
)? {
|
||||
return Err(Error::BadRequest(
|
||||
|
@ -1015,10 +1011,6 @@ pub async fn get_event_route(
|
|||
pub async fn get_backfill_route(
|
||||
body: Ruma<get_backfill::v1::Request>,
|
||||
) -> Result<get_backfill::v1::Response> {
|
||||
if !services().globals.allow_federation() {
|
||||
return Err(Error::bad_config("Federation is disabled."));
|
||||
}
|
||||
|
||||
let sender_servername = body
|
||||
.sender_servername
|
||||
.as_ref()
|
||||
|
@ -1058,7 +1050,7 @@ pub async fn get_backfill_route(
|
|||
let all_events = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.pdus_until(&user_id!("@doesntmatter:conduit.rs"), &body.room_id, until)?
|
||||
.pdus_until(user_id!("@doesntmatter:conduit.rs"), &body.room_id, until)?
|
||||
.take(limit.try_into().unwrap());
|
||||
|
||||
let events = all_events
|
||||
|
@ -1075,7 +1067,7 @@ pub async fn get_backfill_route(
|
|||
})
|
||||
.map(|(_, pdu)| services().rooms.timeline.get_pdu_json(&pdu.event_id))
|
||||
.filter_map(|r| r.ok().flatten())
|
||||
.map(|pdu| PduEvent::convert_to_outgoing_federation_event(pdu))
|
||||
.map(PduEvent::convert_to_outgoing_federation_event)
|
||||
.collect();
|
||||
|
||||
Ok(get_backfill::v1::Response {
|
||||
|
@ -1091,10 +1083,6 @@ pub async fn get_backfill_route(
|
|||
pub async fn get_missing_events_route(
|
||||
body: Ruma<get_missing_events::v1::Request>,
|
||||
) -> Result<get_missing_events::v1::Response> {
|
||||
if !services().globals.allow_federation() {
|
||||
return Err(Error::bad_config("Federation is disabled."));
|
||||
}
|
||||
|
||||
let sender_servername = body
|
||||
.sender_servername
|
||||
.as_ref()
|
||||
|
@ -1180,10 +1168,6 @@ pub async fn get_missing_events_route(
|
|||
pub async fn get_event_authorization_route(
|
||||
body: Ruma<get_event_authorization::v1::Request>,
|
||||
) -> Result<get_event_authorization::v1::Response> {
|
||||
if !services().globals.allow_federation() {
|
||||
return Err(Error::bad_config("Federation is disabled."));
|
||||
}
|
||||
|
||||
let sender_servername = body
|
||||
.sender_servername
|
||||
.as_ref()
|
||||
|
@ -1242,10 +1226,6 @@ pub async fn get_event_authorization_route(
|
|||
pub async fn get_room_state_route(
|
||||
body: Ruma<get_room_state::v1::Request>,
|
||||
) -> Result<get_room_state::v1::Response> {
|
||||
if !services().globals.allow_federation() {
|
||||
return Err(Error::bad_config("Federation is disabled."));
|
||||
}
|
||||
|
||||
let sender_servername = body
|
||||
.sender_servername
|
||||
.as_ref()
|
||||
|
@ -1322,10 +1302,6 @@ pub async fn get_room_state_route(
|
|||
pub async fn get_room_state_ids_route(
|
||||
body: Ruma<get_room_state_ids::v1::Request>,
|
||||
) -> Result<get_room_state_ids::v1::Response> {
|
||||
if !services().globals.allow_federation() {
|
||||
return Err(Error::bad_config("Federation is disabled."));
|
||||
}
|
||||
|
||||
let sender_servername = body
|
||||
.sender_servername
|
||||
.as_ref()
|
||||
|
@ -1383,10 +1359,6 @@ pub async fn get_room_state_ids_route(
|
|||
pub async fn create_join_event_template_route(
|
||||
body: Ruma<prepare_join_event::v1::Request>,
|
||||
) -> Result<prepare_join_event::v1::Response> {
|
||||
if !services().globals.allow_federation() {
|
||||
return Err(Error::bad_config("Federation is disabled."));
|
||||
}
|
||||
|
||||
if !services().rooms.metadata.exists(&body.room_id)? {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::NotFound,
|
||||
|
@ -1409,7 +1381,7 @@ pub async fn create_join_event_template_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(body.room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -1494,10 +1466,6 @@ async fn create_join_event(
|
|||
room_id: &RoomId,
|
||||
pdu: &RawJsonValue,
|
||||
) -> Result<create_join_event::v1::RoomState> {
|
||||
if !services().globals.allow_federation() {
|
||||
return Err(Error::bad_config("Federation is disabled."));
|
||||
}
|
||||
|
||||
if !services().rooms.metadata.exists(room_id)? {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::NotFound,
|
||||
|
@ -1579,7 +1547,7 @@ async fn create_join_event(
|
|||
.globals
|
||||
.roomid_mutex_federation
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -1678,10 +1646,6 @@ pub async fn create_join_event_v2_route(
|
|||
pub async fn create_invite_route(
|
||||
body: Ruma<create_invite::v2::Request>,
|
||||
) -> Result<create_invite::v2::Response> {
|
||||
if !services().globals.allow_federation() {
|
||||
return Err(Error::bad_config("Federation is disabled."));
|
||||
}
|
||||
|
||||
let sender_servername = body
|
||||
.sender_servername
|
||||
.as_ref()
|
||||
|
@ -1795,8 +1759,11 @@ pub async fn create_invite_route(
|
|||
pub async fn get_devices_route(
|
||||
body: Ruma<get_devices::v1::Request>,
|
||||
) -> Result<get_devices::v1::Response> {
|
||||
if !services().globals.allow_federation() {
|
||||
return Err(Error::bad_config("Federation is disabled."));
|
||||
if body.user_id.server_name() != services().globals.server_name() {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Tried to access user from other server.",
|
||||
));
|
||||
}
|
||||
|
||||
let sender_servername = body
|
||||
|
@ -1844,10 +1811,6 @@ pub async fn get_devices_route(
|
|||
pub async fn get_room_information_route(
|
||||
body: Ruma<get_room_information::v1::Request>,
|
||||
) -> Result<get_room_information::v1::Response> {
|
||||
if !services().globals.allow_federation() {
|
||||
return Err(Error::bad_config("Federation is disabled."));
|
||||
}
|
||||
|
||||
let room_id = services()
|
||||
.rooms
|
||||
.alias
|
||||
|
@ -1869,8 +1832,11 @@ pub async fn get_room_information_route(
|
|||
pub async fn get_profile_information_route(
|
||||
body: Ruma<get_profile_information::v1::Request>,
|
||||
) -> Result<get_profile_information::v1::Response> {
|
||||
if !services().globals.allow_federation() {
|
||||
return Err(Error::bad_config("Federation is disabled."));
|
||||
if body.user_id.server_name() != services().globals.server_name() {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Tried to access user from other server.",
|
||||
));
|
||||
}
|
||||
|
||||
let mut displayname = None;
|
||||
|
@ -1905,8 +1871,15 @@ pub async fn get_profile_information_route(
|
|||
///
|
||||
/// Gets devices and identity keys for the given users.
|
||||
pub async fn get_keys_route(body: Ruma<get_keys::v1::Request>) -> Result<get_keys::v1::Response> {
|
||||
if !services().globals.allow_federation() {
|
||||
return Err(Error::bad_config("Federation is disabled."));
|
||||
if body
|
||||
.device_keys
|
||||
.iter()
|
||||
.any(|(u, _)| u.server_name() != services().globals.server_name())
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Tried to access user from other server.",
|
||||
));
|
||||
}
|
||||
|
||||
let result = get_keys_helper(None, &body.device_keys, |u| {
|
||||
|
@ -1927,8 +1900,15 @@ pub async fn get_keys_route(body: Ruma<get_keys::v1::Request>) -> Result<get_key
|
|||
pub async fn claim_keys_route(
|
||||
body: Ruma<claim_keys::v1::Request>,
|
||||
) -> Result<claim_keys::v1::Response> {
|
||||
if !services().globals.allow_federation() {
|
||||
return Err(Error::bad_config("Federation is disabled."));
|
||||
if body
|
||||
.one_time_keys
|
||||
.iter()
|
||||
.any(|(u, _)| u.server_name() != services().globals.server_name())
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Tried to access user from other server.",
|
||||
));
|
||||
}
|
||||
|
||||
let result = claim_keys_helper(&body.one_time_keys).await?;
|
||||
|
|
27
src/clap.rs
Normal file
27
src/clap.rs
Normal file
|
@ -0,0 +1,27 @@
|
|||
//! Integration with `clap`
|
||||
|
||||
use clap::Parser;
|
||||
|
||||
/// Returns the current version of the crate with extra info if supplied
|
||||
///
|
||||
/// Set the environment variable `CONDUIT_VERSION_EXTRA` to any UTF-8 string to
|
||||
/// include it in parenthesis after the SemVer version. A common value are git
|
||||
/// commit hashes.
|
||||
fn version() -> String {
|
||||
let cargo_pkg_version = env!("CARGO_PKG_VERSION");
|
||||
|
||||
match option_env!("CONDUIT_VERSION_EXTRA") {
|
||||
Some(x) => format!("{} ({})", cargo_pkg_version, x),
|
||||
None => cargo_pkg_version.to_owned(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Command line arguments
|
||||
#[derive(Parser)]
|
||||
#[clap(about, version = version())]
|
||||
pub struct Args {}
|
||||
|
||||
/// Parse command line arguments into structured data
|
||||
pub fn parse() -> Args {
|
||||
Args::parse()
|
||||
}
|
|
@ -264,7 +264,7 @@ fn default_trusted_servers() -> Vec<OwnedServerName> {
|
|||
}
|
||||
|
||||
fn default_log() -> String {
|
||||
"warn,state_res=warn,_=off,sled=off".to_owned()
|
||||
"warn,state_res=warn,_=off".to_owned()
|
||||
}
|
||||
|
||||
fn default_turn_ttl() -> u64 {
|
||||
|
|
|
@ -29,7 +29,9 @@ use crate::Result;
|
|||
/// would be used for `ordinary.onion`, `matrix.myspecial.onion`, but not `hello.myspecial.onion`.
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[derive(Default)]
|
||||
pub enum ProxyConfig {
|
||||
#[default]
|
||||
None,
|
||||
Global {
|
||||
#[serde(deserialize_with = "crate::utils::deserialize_from_str")]
|
||||
|
@ -48,11 +50,6 @@ impl ProxyConfig {
|
|||
})
|
||||
}
|
||||
}
|
||||
impl Default for ProxyConfig {
|
||||
fn default() -> Self {
|
||||
ProxyConfig::None
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
pub struct PartialProxyConfig {
|
||||
|
|
|
@ -116,7 +116,7 @@ impl KvTree for PersyTree {
|
|||
match iter {
|
||||
Ok(iter) => Box::new(iter.filter_map(|(k, v)| {
|
||||
v.into_iter()
|
||||
.map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
|
||||
.map(|val| ((*k).to_owned(), (*val).to_owned()))
|
||||
.next()
|
||||
})),
|
||||
Err(e) => {
|
||||
|
@ -142,7 +142,7 @@ impl KvTree for PersyTree {
|
|||
Ok(iter) => {
|
||||
let map = iter.filter_map(|(k, v)| {
|
||||
v.into_iter()
|
||||
.map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
|
||||
.map(|val| ((*k).to_owned(), (*val).to_owned()))
|
||||
.next()
|
||||
});
|
||||
if backwards {
|
||||
|
@ -179,7 +179,7 @@ impl KvTree for PersyTree {
|
|||
iter.take_while(move |(k, _)| (*k).starts_with(&owned_prefix))
|
||||
.filter_map(|(k, v)| {
|
||||
v.into_iter()
|
||||
.map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
|
||||
.map(|val| ((*k).to_owned(), (*val).to_owned()))
|
||||
.next()
|
||||
}),
|
||||
)
|
||||
|
|
|
@ -23,32 +23,29 @@ pub struct RocksDbEngineTree<'a> {
|
|||
fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::Options {
|
||||
let mut block_based_options = rocksdb::BlockBasedOptions::default();
|
||||
block_based_options.set_block_cache(rocksdb_cache);
|
||||
|
||||
// "Difference of spinning disk"
|
||||
// https://zhangyuchi.gitbooks.io/rocksdbbook/content/RocksDB-Tuning-Guide.html
|
||||
block_based_options.set_bloom_filter(10.0, false);
|
||||
block_based_options.set_block_size(4 * 1024);
|
||||
block_based_options.set_cache_index_and_filter_blocks(true);
|
||||
block_based_options.set_pin_l0_filter_and_index_blocks_in_cache(true);
|
||||
block_based_options.set_optimize_filters_for_memory(true);
|
||||
|
||||
let mut db_opts = rocksdb::Options::default();
|
||||
db_opts.set_block_based_table_factory(&block_based_options);
|
||||
db_opts.set_optimize_filters_for_hits(true);
|
||||
db_opts.set_skip_stats_update_on_db_open(true);
|
||||
db_opts.set_level_compaction_dynamic_level_bytes(true);
|
||||
db_opts.set_target_file_size_base(256 * 1024 * 1024);
|
||||
//db_opts.set_compaction_readahead_size(2 * 1024 * 1024);
|
||||
//db_opts.set_use_direct_reads(true);
|
||||
//db_opts.set_use_direct_io_for_flush_and_compaction(true);
|
||||
db_opts.create_if_missing(true);
|
||||
db_opts.increase_parallelism(num_cpus::get() as i32);
|
||||
db_opts.set_max_open_files(max_open_files);
|
||||
db_opts.set_compression_type(rocksdb::DBCompressionType::Zstd);
|
||||
db_opts.set_compression_type(rocksdb::DBCompressionType::Lz4);
|
||||
db_opts.set_bottommost_compression_type(rocksdb::DBCompressionType::Zstd);
|
||||
db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level);
|
||||
db_opts.optimize_level_style_compaction(10 * 1024 * 1024);
|
||||
|
||||
// https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning
|
||||
db_opts.set_level_compaction_dynamic_level_bytes(true);
|
||||
db_opts.set_max_background_jobs(6);
|
||||
db_opts.set_bytes_per_sync(1048576);
|
||||
|
||||
// https://github.com/facebook/rocksdb/issues/849
|
||||
db_opts.set_keep_log_file_num(100);
|
||||
|
||||
// https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes#ktoleratecorruptedtailrecords
|
||||
//
|
||||
// Unclean shutdowns of a Matrix homeserver are likely to be fine when
|
||||
|
@ -56,9 +53,6 @@ fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::O
|
|||
// restored via federation.
|
||||
db_opts.set_wal_recovery_mode(rocksdb::DBRecoveryMode::TolerateCorruptedTailRecords);
|
||||
|
||||
let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1);
|
||||
db_opts.set_prefix_extractor(prefix_extractor);
|
||||
|
||||
db_opts
|
||||
}
|
||||
|
||||
|
@ -144,12 +138,17 @@ impl RocksDbEngineTree<'_> {
|
|||
|
||||
impl KvTree for RocksDbEngineTree<'_> {
|
||||
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
||||
Ok(self.db.rocks.get_cf(&self.cf(), key)?)
|
||||
let readoptions = rocksdb::ReadOptions::default();
|
||||
|
||||
Ok(self.db.rocks.get_cf_opt(&self.cf(), key, &readoptions)?)
|
||||
}
|
||||
|
||||
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> {
|
||||
let writeoptions = rocksdb::WriteOptions::default();
|
||||
let lock = self.write_lock.read().unwrap();
|
||||
self.db.rocks.put_cf(&self.cf(), key, value)?;
|
||||
self.db
|
||||
.rocks
|
||||
.put_cf_opt(&self.cf(), key, value, &writeoptions)?;
|
||||
drop(lock);
|
||||
|
||||
self.watchers.wake(key);
|
||||
|
@ -158,22 +157,31 @@ impl KvTree for RocksDbEngineTree<'_> {
|
|||
}
|
||||
|
||||
fn insert_batch<'a>(&self, iter: &mut dyn Iterator<Item = (Vec<u8>, Vec<u8>)>) -> Result<()> {
|
||||
let writeoptions = rocksdb::WriteOptions::default();
|
||||
for (key, value) in iter {
|
||||
self.db.rocks.put_cf(&self.cf(), key, value)?;
|
||||
self.db
|
||||
.rocks
|
||||
.put_cf_opt(&self.cf(), key, value, &writeoptions)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn remove(&self, key: &[u8]) -> Result<()> {
|
||||
Ok(self.db.rocks.delete_cf(&self.cf(), key)?)
|
||||
let writeoptions = rocksdb::WriteOptions::default();
|
||||
Ok(self
|
||||
.db
|
||||
.rocks
|
||||
.delete_cf_opt(&self.cf(), key, &writeoptions)?)
|
||||
}
|
||||
|
||||
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
||||
let readoptions = rocksdb::ReadOptions::default();
|
||||
|
||||
Box::new(
|
||||
self.db
|
||||
.rocks
|
||||
.iterator_cf(&self.cf(), rocksdb::IteratorMode::Start)
|
||||
.iterator_cf_opt(&self.cf(), readoptions, rocksdb::IteratorMode::Start)
|
||||
.map(|r| r.unwrap())
|
||||
.map(|(k, v)| (Vec::from(k), Vec::from(v))),
|
||||
)
|
||||
|
@ -184,11 +192,14 @@ impl KvTree for RocksDbEngineTree<'_> {
|
|||
from: &[u8],
|
||||
backwards: bool,
|
||||
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
||||
let readoptions = rocksdb::ReadOptions::default();
|
||||
|
||||
Box::new(
|
||||
self.db
|
||||
.rocks
|
||||
.iterator_cf(
|
||||
.iterator_cf_opt(
|
||||
&self.cf(),
|
||||
readoptions,
|
||||
rocksdb::IteratorMode::From(
|
||||
from,
|
||||
if backwards {
|
||||
|
@ -204,23 +215,33 @@ impl KvTree for RocksDbEngineTree<'_> {
|
|||
}
|
||||
|
||||
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
|
||||
let readoptions = rocksdb::ReadOptions::default();
|
||||
let writeoptions = rocksdb::WriteOptions::default();
|
||||
|
||||
let lock = self.write_lock.write().unwrap();
|
||||
|
||||
let old = self.db.rocks.get_cf(&self.cf(), key)?;
|
||||
let old = self.db.rocks.get_cf_opt(&self.cf(), key, &readoptions)?;
|
||||
let new = utils::increment(old.as_deref()).unwrap();
|
||||
self.db.rocks.put_cf(&self.cf(), key, &new)?;
|
||||
self.db
|
||||
.rocks
|
||||
.put_cf_opt(&self.cf(), key, &new, &writeoptions)?;
|
||||
|
||||
drop(lock);
|
||||
Ok(new)
|
||||
}
|
||||
|
||||
fn increment_batch<'a>(&self, iter: &mut dyn Iterator<Item = Vec<u8>>) -> Result<()> {
|
||||
let readoptions = rocksdb::ReadOptions::default();
|
||||
let writeoptions = rocksdb::WriteOptions::default();
|
||||
|
||||
let lock = self.write_lock.write().unwrap();
|
||||
|
||||
for key in iter {
|
||||
let old = self.db.rocks.get_cf(&self.cf(), &key)?;
|
||||
let old = self.db.rocks.get_cf_opt(&self.cf(), &key, &readoptions)?;
|
||||
let new = utils::increment(old.as_deref()).unwrap();
|
||||
self.db.rocks.put_cf(&self.cf(), key, new)?;
|
||||
self.db
|
||||
.rocks
|
||||
.put_cf_opt(&self.cf(), key, new, &writeoptions)?;
|
||||
}
|
||||
|
||||
drop(lock);
|
||||
|
@ -232,11 +253,14 @@ impl KvTree for RocksDbEngineTree<'_> {
|
|||
&'a self,
|
||||
prefix: Vec<u8>,
|
||||
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
||||
let readoptions = rocksdb::ReadOptions::default();
|
||||
|
||||
Box::new(
|
||||
self.db
|
||||
.rocks
|
||||
.iterator_cf(
|
||||
.iterator_cf_opt(
|
||||
&self.cf(),
|
||||
readoptions,
|
||||
rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward),
|
||||
)
|
||||
.map(|r| r.unwrap())
|
||||
|
|
|
@ -33,7 +33,7 @@ impl Iterator for PreparedStatementIterator<'_> {
|
|||
struct NonAliasingBox<T>(*mut T);
|
||||
impl<T> Drop for NonAliasingBox<T> {
|
||||
fn drop(&mut self) {
|
||||
unsafe { Box::from_raw(self.0) };
|
||||
drop(unsafe { Box::from_raw(self.0) });
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ use tokio::sync::watch;
|
|||
|
||||
#[derive(Default)]
|
||||
pub(super) struct Watchers {
|
||||
#[allow(clippy::type_complexity)]
|
||||
watchers: RwLock<HashMap<Vec<u8>, (watch::Sender<()>, watch::Receiver<()>)>>,
|
||||
}
|
||||
|
||||
|
|
|
@ -123,13 +123,12 @@ impl service::account_data::Data for KeyValueDatabase {
|
|||
.take_while(move |(k, _)| k.starts_with(&prefix))
|
||||
.map(|(k, v)| {
|
||||
Ok::<_, Error>((
|
||||
RoomAccountDataEventType::try_from(
|
||||
RoomAccountDataEventType::from(
|
||||
utils::string_from_bytes(k.rsplit(|&b| b == 0xff).next().ok_or_else(
|
||||
|| Error::bad_database("RoomUserData ID in db is invalid."),
|
||||
)?)
|
||||
.map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?,
|
||||
)
|
||||
.map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?,
|
||||
),
|
||||
serde_json::from_slice::<Raw<AnyEphemeralRoomEvent>>(&v).map_err(|_| {
|
||||
Error::bad_database("Database contains invalid account data.")
|
||||
})?,
|
||||
|
|
|
@ -1,18 +1,15 @@
|
|||
use ruma::api::appservice::Registration;
|
||||
|
||||
use crate::{database::KeyValueDatabase, service, utils, Error, Result};
|
||||
|
||||
impl service::appservice::Data for KeyValueDatabase {
|
||||
/// Registers an appservice and returns the ID to the caller
|
||||
fn register_appservice(&self, yaml: serde_yaml::Value) -> Result<String> {
|
||||
// TODO: Rumaify
|
||||
let id = yaml.get("id").unwrap().as_str().unwrap();
|
||||
fn register_appservice(&self, yaml: Registration) -> Result<String> {
|
||||
let id = yaml.id.as_str();
|
||||
self.id_appserviceregistrations.insert(
|
||||
id.as_bytes(),
|
||||
serde_yaml::to_string(&yaml).unwrap().as_bytes(),
|
||||
)?;
|
||||
self.cached_registrations
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(id.to_owned(), yaml.to_owned());
|
||||
|
||||
Ok(id.to_owned())
|
||||
}
|
||||
|
@ -25,33 +22,18 @@ impl service::appservice::Data for KeyValueDatabase {
|
|||
fn unregister_appservice(&self, service_name: &str) -> Result<()> {
|
||||
self.id_appserviceregistrations
|
||||
.remove(service_name.as_bytes())?;
|
||||
self.cached_registrations
|
||||
.write()
|
||||
.unwrap()
|
||||
.remove(service_name);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_registration(&self, id: &str) -> Result<Option<serde_yaml::Value>> {
|
||||
self.cached_registrations
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(id)
|
||||
.map_or_else(
|
||||
|| {
|
||||
fn get_registration(&self, id: &str) -> Result<Option<Registration>> {
|
||||
self.id_appserviceregistrations
|
||||
.get(id.as_bytes())?
|
||||
.map(|bytes| {
|
||||
serde_yaml::from_slice(&bytes).map_err(|_| {
|
||||
Error::bad_database(
|
||||
"Invalid registration bytes in id_appserviceregistrations.",
|
||||
)
|
||||
Error::bad_database("Invalid registration bytes in id_appserviceregistrations.")
|
||||
})
|
||||
})
|
||||
.transpose()
|
||||
},
|
||||
|r| Ok(Some(r.clone())),
|
||||
)
|
||||
}
|
||||
|
||||
fn iter_ids<'a>(&'a self) -> Result<Box<dyn Iterator<Item = Result<String>> + 'a>> {
|
||||
|
@ -64,7 +46,7 @@ impl service::appservice::Data for KeyValueDatabase {
|
|||
)))
|
||||
}
|
||||
|
||||
fn all(&self) -> Result<Vec<(String, serde_yaml::Value)>> {
|
||||
fn all(&self) -> Result<Vec<(String, Registration)>> {
|
||||
self.iter_ids()?
|
||||
.filter_map(|id| id.ok())
|
||||
.map(move |id| {
|
||||
|
|
|
@ -94,7 +94,9 @@ impl service::globals::Data for KeyValueDatabase {
|
|||
futures.push(self.pduid_pdu.watch_prefix(&short_roomid));
|
||||
|
||||
// EDUs
|
||||
futures.push(self.roomid_lasttypingupdate.watch_prefix(&roomid_bytes));
|
||||
futures.push(Box::into_pin(Box::new(async move {
|
||||
let _result = services().rooms.edus.typing.wait_for_update(&room_id).await;
|
||||
})));
|
||||
|
||||
futures.push(self.readreceiptid_readreceipt.watch_prefix(&roomid_prefix));
|
||||
|
||||
|
@ -256,8 +258,8 @@ lasttimelinecount_cache: {lasttimelinecount_cache}\n"
|
|||
..
|
||||
} = new_keys;
|
||||
|
||||
keys.verify_keys.extend(verify_keys.into_iter());
|
||||
keys.old_verify_keys.extend(old_verify_keys.into_iter());
|
||||
keys.verify_keys.extend(verify_keys);
|
||||
keys.old_verify_keys.extend(old_verify_keys);
|
||||
|
||||
self.server_signingkeys.insert(
|
||||
origin.as_bytes(),
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
mod presence;
|
||||
mod read_receipt;
|
||||
mod typing;
|
||||
|
||||
use crate::{database::KeyValueDatabase, service};
|
||||
|
||||
|
|
|
@ -1,127 +0,0 @@
|
|||
use std::{collections::HashSet, mem};
|
||||
|
||||
use ruma::{OwnedUserId, RoomId, UserId};
|
||||
|
||||
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||
|
||||
impl service::rooms::edus::typing::Data for KeyValueDatabase {
|
||||
fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> {
|
||||
let mut prefix = room_id.as_bytes().to_vec();
|
||||
prefix.push(0xff);
|
||||
|
||||
let count = services().globals.next_count()?.to_be_bytes();
|
||||
|
||||
let mut room_typing_id = prefix;
|
||||
room_typing_id.extend_from_slice(&timeout.to_be_bytes());
|
||||
room_typing_id.push(0xff);
|
||||
room_typing_id.extend_from_slice(&count);
|
||||
|
||||
self.typingid_userid
|
||||
.insert(&room_typing_id, user_id.as_bytes())?;
|
||||
|
||||
self.roomid_lasttypingupdate
|
||||
.insert(room_id.as_bytes(), &count)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
||||
let mut prefix = room_id.as_bytes().to_vec();
|
||||
prefix.push(0xff);
|
||||
|
||||
let user_id = user_id.to_string();
|
||||
|
||||
let mut found_outdated = false;
|
||||
|
||||
// Maybe there are multiple ones from calling roomtyping_add multiple times
|
||||
for outdated_edu in self
|
||||
.typingid_userid
|
||||
.scan_prefix(prefix)
|
||||
.filter(|(_, v)| &**v == user_id.as_bytes())
|
||||
{
|
||||
self.typingid_userid.remove(&outdated_edu.0)?;
|
||||
found_outdated = true;
|
||||
}
|
||||
|
||||
if found_outdated {
|
||||
self.roomid_lasttypingupdate.insert(
|
||||
room_id.as_bytes(),
|
||||
&services().globals.next_count()?.to_be_bytes(),
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn typings_maintain(&self, room_id: &RoomId) -> Result<()> {
|
||||
let mut prefix = room_id.as_bytes().to_vec();
|
||||
prefix.push(0xff);
|
||||
|
||||
let current_timestamp = utils::millis_since_unix_epoch();
|
||||
|
||||
let mut found_outdated = false;
|
||||
|
||||
// Find all outdated edus before inserting a new one
|
||||
for outdated_edu in self
|
||||
.typingid_userid
|
||||
.scan_prefix(prefix)
|
||||
.map(|(key, _)| {
|
||||
Ok::<_, Error>((
|
||||
key.clone(),
|
||||
utils::u64_from_bytes(
|
||||
&key.splitn(2, |&b| b == 0xff).nth(1).ok_or_else(|| {
|
||||
Error::bad_database("RoomTyping has invalid timestamp or delimiters.")
|
||||
})?[0..mem::size_of::<u64>()],
|
||||
)
|
||||
.map_err(|_| Error::bad_database("RoomTyping has invalid timestamp bytes."))?,
|
||||
))
|
||||
})
|
||||
.filter_map(|r| r.ok())
|
||||
.take_while(|&(_, timestamp)| timestamp < current_timestamp)
|
||||
{
|
||||
// This is an outdated edu (time > timestamp)
|
||||
self.typingid_userid.remove(&outdated_edu.0)?;
|
||||
found_outdated = true;
|
||||
}
|
||||
|
||||
if found_outdated {
|
||||
self.roomid_lasttypingupdate.insert(
|
||||
room_id.as_bytes(),
|
||||
&services().globals.next_count()?.to_be_bytes(),
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn last_typing_update(&self, room_id: &RoomId) -> Result<u64> {
|
||||
Ok(self
|
||||
.roomid_lasttypingupdate
|
||||
.get(room_id.as_bytes())?
|
||||
.map(|bytes| {
|
||||
utils::u64_from_bytes(&bytes).map_err(|_| {
|
||||
Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.")
|
||||
})
|
||||
})
|
||||
.transpose()?
|
||||
.unwrap_or(0))
|
||||
}
|
||||
|
||||
fn typings_all(&self, room_id: &RoomId) -> Result<HashSet<OwnedUserId>> {
|
||||
let mut prefix = room_id.as_bytes().to_vec();
|
||||
prefix.push(0xff);
|
||||
|
||||
let mut user_ids = HashSet::new();
|
||||
|
||||
for (_, user_id) in self.typingid_userid.scan_prefix(prefix) {
|
||||
let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| {
|
||||
Error::bad_database("User ID in typingid_userid is invalid unicode.")
|
||||
})?)
|
||||
.map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?;
|
||||
|
||||
user_ids.insert(user_id);
|
||||
}
|
||||
|
||||
Ok(user_ids)
|
||||
}
|
||||
}
|
|
@ -157,10 +157,9 @@ impl service::rooms::short::Data for KeyValueDatabase {
|
|||
.ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?;
|
||||
|
||||
let event_type =
|
||||
StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| {
|
||||
StateEventType::from(utils::string_from_bytes(eventtype_bytes).map_err(|_| {
|
||||
Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.")
|
||||
})?)
|
||||
.map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?;
|
||||
})?);
|
||||
|
||||
let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| {
|
||||
Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.")
|
||||
|
|
|
@ -20,7 +20,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
|||
let parsed = services()
|
||||
.rooms
|
||||
.state_compressor
|
||||
.parse_compressed_state_event(&compressed)?;
|
||||
.parse_compressed_state_event(compressed)?;
|
||||
result.insert(parsed.0, parsed.1);
|
||||
|
||||
i += 1;
|
||||
|
@ -49,7 +49,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
|||
let (_, eventid) = services()
|
||||
.rooms
|
||||
.state_compressor
|
||||
.parse_compressed_state_event(&compressed)?;
|
||||
.parse_compressed_state_event(compressed)?;
|
||||
if let Some(pdu) = services().rooms.timeline.get_pdu(&eventid)? {
|
||||
result.insert(
|
||||
(
|
||||
|
@ -101,7 +101,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
|||
services()
|
||||
.rooms
|
||||
.state_compressor
|
||||
.parse_compressed_state_event(&compressed)
|
||||
.parse_compressed_state_event(compressed)
|
||||
.ok()
|
||||
.map(|(_, id)| id)
|
||||
}))
|
||||
|
|
|
@ -1,13 +1,16 @@
|
|||
use std::{collections::HashSet, sync::Arc};
|
||||
|
||||
use regex::Regex;
|
||||
use ruma::{
|
||||
events::{AnyStrippedStateEvent, AnySyncStateEvent},
|
||||
serde::Raw,
|
||||
OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
|
||||
};
|
||||
|
||||
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||
use crate::{
|
||||
database::KeyValueDatabase,
|
||||
service::{self, appservice::RegistrationInfo},
|
||||
services, utils, Error, Result,
|
||||
};
|
||||
|
||||
impl service::rooms::state_cache::Data for KeyValueDatabase {
|
||||
fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
||||
|
@ -184,46 +187,28 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
|
|||
}
|
||||
|
||||
#[tracing::instrument(skip(self, room_id, appservice))]
|
||||
fn appservice_in_room(
|
||||
&self,
|
||||
room_id: &RoomId,
|
||||
appservice: &(String, serde_yaml::Value),
|
||||
) -> Result<bool> {
|
||||
fn appservice_in_room(&self, room_id: &RoomId, appservice: &RegistrationInfo) -> Result<bool> {
|
||||
let maybe = self
|
||||
.appservice_in_room_cache
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(room_id)
|
||||
.and_then(|map| map.get(&appservice.0))
|
||||
.and_then(|map| map.get(&appservice.registration.id))
|
||||
.copied();
|
||||
|
||||
if let Some(b) = maybe {
|
||||
Ok(b)
|
||||
} else if let Some(namespaces) = appservice.1.get("namespaces") {
|
||||
let users = namespaces
|
||||
.get("users")
|
||||
.and_then(|users| users.as_sequence())
|
||||
.map_or_else(Vec::new, |users| {
|
||||
users
|
||||
.iter()
|
||||
.filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok())
|
||||
.collect::<Vec<_>>()
|
||||
});
|
||||
|
||||
let bridge_user_id = appservice
|
||||
.1
|
||||
.get("sender_localpart")
|
||||
.and_then(|string| string.as_str())
|
||||
.and_then(|string| {
|
||||
UserId::parse_with_server_name(string, services().globals.server_name()).ok()
|
||||
});
|
||||
} else {
|
||||
let bridge_user_id = UserId::parse_with_server_name(
|
||||
appservice.registration.sender_localpart.as_str(),
|
||||
services().globals.server_name(),
|
||||
)
|
||||
.ok();
|
||||
|
||||
let in_room = bridge_user_id
|
||||
.map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false))
|
||||
|| self.room_members(room_id).any(|userid| {
|
||||
userid.map_or(false, |userid| {
|
||||
users.iter().any(|r| r.is_match(userid.as_str()))
|
||||
})
|
||||
userid.map_or(false, |userid| appservice.users.is_match(userid.as_str()))
|
||||
});
|
||||
|
||||
self.appservice_in_room_cache
|
||||
|
@ -231,11 +216,9 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
|
|||
.unwrap()
|
||||
.entry(room_id.to_owned())
|
||||
.or_default()
|
||||
.insert(appservice.0.clone(), in_room);
|
||||
.insert(appservice.registration.id.clone(), in_room);
|
||||
|
||||
Ok(in_room)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -471,6 +454,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
|
|||
}
|
||||
|
||||
/// Returns an iterator over all rooms a user was invited to.
|
||||
#[allow(clippy::type_complexity)]
|
||||
#[tracing::instrument(skip(self))]
|
||||
fn rooms_invited<'a>(
|
||||
&'a self,
|
||||
|
@ -549,6 +533,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
|
|||
}
|
||||
|
||||
/// Returns an iterator over all rooms a user left.
|
||||
#[allow(clippy::type_complexity)]
|
||||
#[tracing::instrument(skip(self))]
|
||||
fn rooms_left<'a>(
|
||||
&'a self,
|
||||
|
|
|
@ -10,7 +10,7 @@ impl service::rooms::threads::Data for KeyValueDatabase {
|
|||
user_id: &'a UserId,
|
||||
room_id: &'a RoomId,
|
||||
until: u64,
|
||||
include: &'a IncludeThreads,
|
||||
_include: &'a IncludeThreads,
|
||||
) -> Result<Box<dyn Iterator<Item = Result<(u64, PduEvent)>> + 'a>> {
|
||||
let prefix = services()
|
||||
.rooms
|
||||
|
@ -27,7 +27,7 @@ impl service::rooms::threads::Data for KeyValueDatabase {
|
|||
self.threadid_userids
|
||||
.iter_from(¤t, true)
|
||||
.take_while(move |(k, _)| k.starts_with(&prefix))
|
||||
.map(move |(pduid, users)| {
|
||||
.map(move |(pduid, _users)| {
|
||||
let count = utils::u64_from_bytes(&pduid[(mem::size_of::<u64>())..])
|
||||
.map_err(|_| Error::bad_database("Invalid pduid in threadid_userids."))?;
|
||||
let mut pdu = services()
|
||||
|
@ -52,13 +52,13 @@ impl service::rooms::threads::Data for KeyValueDatabase {
|
|||
.collect::<Vec<_>>()
|
||||
.join(&[0xff][..]);
|
||||
|
||||
self.threadid_userids.insert(&root_id, &users)?;
|
||||
self.threadid_userids.insert(root_id, &users)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_participants(&self, root_id: &[u8]) -> Result<Option<Vec<OwnedUserId>>> {
|
||||
if let Some(users) = self.threadid_userids.get(&root_id)? {
|
||||
if let Some(users) = self.threadid_userids.get(root_id)? {
|
||||
Ok(Some(
|
||||
users
|
||||
.split(|b| *b == 0xff)
|
||||
|
|
|
@ -39,11 +39,10 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
|||
|
||||
/// Returns the `count` of this pdu's id.
|
||||
fn get_pdu_count(&self, event_id: &EventId) -> Result<Option<PduCount>> {
|
||||
Ok(self
|
||||
.eventid_pduid
|
||||
self.eventid_pduid
|
||||
.get(event_id.as_bytes())?
|
||||
.map(|pdu_id| pdu_count(&pdu_id))
|
||||
.transpose()?)
|
||||
.transpose()
|
||||
}
|
||||
|
||||
/// Returns the json of a pdu.
|
||||
|
@ -80,7 +79,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
|||
|
||||
/// Returns the pdu's id.
|
||||
fn get_pdu_id(&self, event_id: &EventId) -> Result<Option<Vec<u8>>> {
|
||||
Ok(self.eventid_pduid.get(event_id.as_bytes())?)
|
||||
self.eventid_pduid.get(event_id.as_bytes())
|
||||
}
|
||||
|
||||
/// Returns the pdu.
|
||||
|
@ -230,7 +229,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
|||
room_id: &RoomId,
|
||||
until: PduCount,
|
||||
) -> Result<Box<dyn Iterator<Item = Result<(PduCount, PduEvent)>> + 'a>> {
|
||||
let (prefix, current) = count_to_id(&room_id, until, 1, true)?;
|
||||
let (prefix, current) = count_to_id(room_id, until, 1, true)?;
|
||||
|
||||
let user_id = user_id.to_owned();
|
||||
|
||||
|
@ -257,7 +256,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
|||
room_id: &RoomId,
|
||||
from: PduCount,
|
||||
) -> Result<Box<dyn Iterator<Item = Result<(PduCount, PduEvent)>> + 'a>> {
|
||||
let (prefix, current) = count_to_id(&room_id, from, 1, false)?;
|
||||
let (prefix, current) = count_to_id(room_id, from, 1, false)?;
|
||||
|
||||
let user_id = user_id.to_owned();
|
||||
|
||||
|
@ -332,7 +331,7 @@ fn count_to_id(
|
|||
.rooms
|
||||
.short
|
||||
.get_shortroomid(room_id)?
|
||||
.expect("room exists")
|
||||
.ok_or_else(|| Error::bad_database("Looked for bad shortroomid in timeline"))?
|
||||
.to_be_bytes()
|
||||
.to_vec();
|
||||
let mut pdu_id = prefix.clone();
|
||||
|
|
|
@ -146,10 +146,9 @@ impl service::users::Data for KeyValueDatabase {
|
|||
self.userid_avatarurl
|
||||
.get(user_id.as_bytes())?
|
||||
.map(|bytes| {
|
||||
let s = utils::string_from_bytes(&bytes)
|
||||
.map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?;
|
||||
s.try_into()
|
||||
utils::string_from_bytes(&bytes)
|
||||
.map_err(|_| Error::bad_database("Avatar URL in db is invalid."))
|
||||
.map(Into::into)
|
||||
})
|
||||
.transpose()
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ use crate::{
|
|||
use abstraction::{KeyValueDatabaseEngine, KvTree};
|
||||
use directories::ProjectDirs;
|
||||
use lru_cache::LruCache;
|
||||
|
||||
use ruma::{
|
||||
events::{
|
||||
push_rules::{PushRulesEvent, PushRulesEventContent},
|
||||
|
@ -70,8 +71,6 @@ pub struct KeyValueDatabase {
|
|||
pub(super) readreceiptid_readreceipt: Arc<dyn KvTree>, // ReadReceiptId = RoomId + Count + UserId
|
||||
pub(super) roomuserid_privateread: Arc<dyn KvTree>, // RoomUserId = Room + User, PrivateRead = Count
|
||||
pub(super) roomuserid_lastprivatereadupdate: Arc<dyn KvTree>, // LastPrivateReadUpdate = Count
|
||||
pub(super) typingid_userid: Arc<dyn KvTree>, // TypingId = RoomId + TimeoutTime + Count
|
||||
pub(super) roomid_lasttypingupdate: Arc<dyn KvTree>, // LastRoomTypingUpdate = Count
|
||||
pub(super) presenceid_presence: Arc<dyn KvTree>, // PresenceId = RoomId + Count + UserId
|
||||
pub(super) userid_lastpresenceupdate: Arc<dyn KvTree>, // LastPresenceUpdate = Count
|
||||
|
||||
|
@ -162,7 +161,6 @@ pub struct KeyValueDatabase {
|
|||
//pub pusher: pusher::PushData,
|
||||
pub(super) senderkey_pusher: Arc<dyn KvTree>,
|
||||
|
||||
pub(super) cached_registrations: Arc<RwLock<HashMap<String, serde_yaml::Value>>>,
|
||||
pub(super) pdu_cache: Mutex<LruCache<OwnedEventId, Arc<PduEvent>>>,
|
||||
pub(super) shorteventid_cache: Mutex<LruCache<u64, Arc<EventId>>>,
|
||||
pub(super) auth_chain_cache: Mutex<LruCache<Vec<u64>, Arc<HashSet<u64>>>>,
|
||||
|
@ -301,8 +299,6 @@ impl KeyValueDatabase {
|
|||
roomuserid_privateread: builder.open_tree("roomuserid_privateread")?, // "Private" read receipt
|
||||
roomuserid_lastprivatereadupdate: builder
|
||||
.open_tree("roomuserid_lastprivatereadupdate")?,
|
||||
typingid_userid: builder.open_tree("typingid_userid")?,
|
||||
roomid_lasttypingupdate: builder.open_tree("roomid_lasttypingupdate")?,
|
||||
presenceid_presence: builder.open_tree("presenceid_presence")?,
|
||||
userid_lastpresenceupdate: builder.open_tree("userid_lastpresenceupdate")?,
|
||||
pduid_pdu: builder.open_tree("pduid_pdu")?,
|
||||
|
@ -372,7 +368,6 @@ impl KeyValueDatabase {
|
|||
global: builder.open_tree("global")?,
|
||||
server_signingkeys: builder.open_tree("server_signingkeys")?,
|
||||
|
||||
cached_registrations: Arc::new(RwLock::new(HashMap::new())),
|
||||
pdu_cache: Mutex::new(LruCache::new(
|
||||
config
|
||||
.pdu_cache_capacity
|
||||
|
@ -852,7 +847,9 @@ impl KeyValueDatabase {
|
|||
if rule.is_some() {
|
||||
let mut rule = rule.unwrap().clone();
|
||||
rule.rule_id = content_rule_transformation[1].to_owned();
|
||||
rules_list.content.remove(content_rule_transformation[0]);
|
||||
rules_list
|
||||
.content
|
||||
.shift_remove(content_rule_transformation[0]);
|
||||
rules_list.content.insert(rule);
|
||||
}
|
||||
}
|
||||
|
@ -875,7 +872,7 @@ impl KeyValueDatabase {
|
|||
if let Some(rule) = rule {
|
||||
let mut rule = rule.clone();
|
||||
rule.rule_id = transformation[1].to_owned();
|
||||
rules_list.underride.remove(transformation[0]);
|
||||
rules_list.underride.shift_remove(transformation[0]);
|
||||
rules_list.underride.insert(rule);
|
||||
}
|
||||
}
|
||||
|
|
13
src/lib.rs
13
src/lib.rs
|
@ -1,18 +1,13 @@
|
|||
#![warn(
|
||||
rust_2018_idioms,
|
||||
unused_qualifications,
|
||||
clippy::cloned_instead_of_copied,
|
||||
clippy::str_to_string
|
||||
)]
|
||||
#![allow(clippy::suspicious_else_formatting)]
|
||||
#![deny(clippy::dbg_macro)]
|
||||
|
||||
pub mod api;
|
||||
pub mod clap;
|
||||
mod config;
|
||||
mod database;
|
||||
mod service;
|
||||
mod utils;
|
||||
|
||||
// Not async due to services() being used in many closures, and async closures are not stable as of writing
|
||||
// This is the case for every other occurence of sync Mutex/RwLock, except for database related ones, where
|
||||
// the current maintainer (Timo) has asked to not modify those
|
||||
use std::sync::RwLock;
|
||||
|
||||
pub use api::ruma_wrapper::{Ruma, RumaResponse};
|
||||
|
|
20
src/main.rs
20
src/main.rs
|
@ -1,13 +1,3 @@
|
|||
#![warn(
|
||||
rust_2018_idioms,
|
||||
unused_qualifications,
|
||||
clippy::cloned_instead_of_copied,
|
||||
clippy::str_to_string,
|
||||
clippy::future_not_send
|
||||
)]
|
||||
#![allow(clippy::suspicious_else_formatting)]
|
||||
#![deny(clippy::dbg_macro)]
|
||||
|
||||
use std::{future::Future, io, net::SocketAddr, sync::atomic, time::Duration};
|
||||
|
||||
use axum::{
|
||||
|
@ -54,6 +44,8 @@ static GLOBAL: Jemalloc = Jemalloc;
|
|||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
clap::parse();
|
||||
|
||||
// Initialize config
|
||||
let raw_config =
|
||||
Figment::new()
|
||||
|
@ -75,8 +67,6 @@ async fn main() {
|
|||
|
||||
config.warn_deprecated();
|
||||
|
||||
let log = format!("{},ruma_state_res=error,_=off,sled=off", config.log);
|
||||
|
||||
if config.allow_jaeger {
|
||||
opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new());
|
||||
let tracer = opentelemetry_jaeger::new_agent_pipeline()
|
||||
|
@ -86,7 +76,7 @@ async fn main() {
|
|||
.unwrap();
|
||||
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
|
||||
|
||||
let filter_layer = match EnvFilter::try_new(&log) {
|
||||
let filter_layer = match EnvFilter::try_new(&config.log) {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
eprintln!(
|
||||
|
@ -113,7 +103,7 @@ async fn main() {
|
|||
} else {
|
||||
let registry = tracing_subscriber::Registry::default();
|
||||
let fmt_layer = tracing_subscriber::fmt::Layer::new();
|
||||
let filter_layer = match EnvFilter::try_new(&log) {
|
||||
let filter_layer = match EnvFilter::try_new(&config.log) {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
eprintln!("It looks like your config is invalid. The following error occured while parsing it: {e}");
|
||||
|
@ -238,7 +228,7 @@ async fn spawn_task<B: Send + 'static>(
|
|||
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)
|
||||
}
|
||||
|
||||
async fn unrecognized_method<B>(
|
||||
async fn unrecognized_method<B: Send>(
|
||||
req: axum::http::Request<B>,
|
||||
next: axum::middleware::Next<B>,
|
||||
) -> std::result::Result<axum::response::Response, StatusCode> {
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
use std::{
|
||||
collections::BTreeMap,
|
||||
convert::{TryFrom, TryInto},
|
||||
sync::{Arc, RwLock},
|
||||
sync::Arc,
|
||||
time::Instant,
|
||||
};
|
||||
|
||||
use clap::Parser;
|
||||
use regex::Regex;
|
||||
use ruma::{
|
||||
api::appservice::Registration,
|
||||
events::{
|
||||
room::{
|
||||
canonical_alias::RoomCanonicalAliasEventContent,
|
||||
|
@ -23,10 +24,10 @@ use ruma::{
|
|||
},
|
||||
TimelineEventType,
|
||||
},
|
||||
EventId, OwnedRoomAliasId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
|
||||
EventId, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
|
||||
};
|
||||
use serde_json::value::to_raw_value;
|
||||
use tokio::sync::{mpsc, Mutex, MutexGuard};
|
||||
use tokio::sync::{mpsc, Mutex, RwLock};
|
||||
|
||||
use crate::{
|
||||
api::client_server::{leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH},
|
||||
|
@ -50,7 +51,7 @@ enum AdminCommand {
|
|||
/// Registering a new bridge using the ID of an existing bridge will replace
|
||||
/// the old one.
|
||||
///
|
||||
/// [commandbody]
|
||||
/// [commandbody]()
|
||||
/// # ```
|
||||
/// # yaml content here
|
||||
/// # ```
|
||||
|
@ -96,7 +97,7 @@ enum AdminCommand {
|
|||
/// Removing a mass amount of users from a room may cause a significant amount of leave events.
|
||||
/// The time to leave rooms may depend significantly on joined rooms and servers.
|
||||
///
|
||||
/// [commandbody]
|
||||
/// [commandbody]()
|
||||
/// # ```
|
||||
/// # User list here
|
||||
/// # ```
|
||||
|
@ -121,7 +122,7 @@ enum AdminCommand {
|
|||
/// The PDU event is only checked for validity and is not added to the
|
||||
/// database.
|
||||
///
|
||||
/// [commandbody]
|
||||
/// [commandbody]()
|
||||
/// # ```
|
||||
/// # PDU json content here
|
||||
/// # ```
|
||||
|
@ -165,14 +166,14 @@ enum AdminCommand {
|
|||
EnableRoom { room_id: Box<RoomId> },
|
||||
|
||||
/// Verify json signatures
|
||||
/// [commandbody]
|
||||
/// [commandbody]()
|
||||
/// # ```
|
||||
/// # json here
|
||||
/// # ```
|
||||
SignJson,
|
||||
|
||||
/// Verify json signatures
|
||||
/// [commandbody]
|
||||
/// [commandbody]()
|
||||
/// # ```
|
||||
/// # json here
|
||||
/// # ```
|
||||
|
@ -214,38 +215,7 @@ impl Service {
|
|||
let conduit_user = UserId::parse(format!("@conduit:{}", services().globals.server_name()))
|
||||
.expect("@conduit:server_name is valid");
|
||||
|
||||
let conduit_room = services()
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_local_alias(
|
||||
format!("#admins:{}", services().globals.server_name())
|
||||
.as_str()
|
||||
.try_into()
|
||||
.expect("#admins:server_name is a valid room alias"),
|
||||
)
|
||||
.expect("Database data for admin room alias must be valid")
|
||||
.expect("Admin room must exist");
|
||||
|
||||
let send_message = |message: RoomMessageEventContent, mutex_lock: &MutexGuard<'_, ()>| {
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMessage,
|
||||
content: to_raw_value(&message)
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: None,
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&conduit_room,
|
||||
mutex_lock,
|
||||
)
|
||||
.unwrap();
|
||||
};
|
||||
|
||||
if let Ok(Some(conduit_room)) = services().admin.get_admin_room() {
|
||||
loop {
|
||||
tokio::select! {
|
||||
Some(event) = receiver.recv() => {
|
||||
|
@ -258,16 +228,31 @@ impl Service {
|
|||
services().globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(conduit_room.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
send_message(message_content, &state_lock);
|
||||
|
||||
drop(state_lock);
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMessage,
|
||||
content: to_raw_value(&message_content)
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: None,
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&conduit_room,
|
||||
&state_lock,
|
||||
)
|
||||
.await.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -351,10 +336,9 @@ impl Service {
|
|||
if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```"
|
||||
{
|
||||
let appservice_config = body[1..body.len() - 1].join("\n");
|
||||
let parsed_config =
|
||||
serde_yaml::from_str::<serde_yaml::Value>(&appservice_config);
|
||||
let parsed_config = serde_yaml::from_str::<Registration>(&appservice_config);
|
||||
match parsed_config {
|
||||
Ok(yaml) => match services().appservice.register_appservice(yaml) {
|
||||
Ok(yaml) => match services().appservice.register_appservice(yaml).await {
|
||||
Ok(id) => RoomMessageEventContent::text_plain(format!(
|
||||
"Appservice registered with ID: {id}."
|
||||
)),
|
||||
|
@ -377,6 +361,7 @@ impl Service {
|
|||
} => match services()
|
||||
.appservice
|
||||
.unregister_appservice(&appservice_identifier)
|
||||
.await
|
||||
{
|
||||
Ok(()) => RoomMessageEventContent::text_plain("Appservice unregistered."),
|
||||
Err(e) => RoomMessageEventContent::text_plain(format!(
|
||||
|
@ -384,25 +369,13 @@ impl Service {
|
|||
)),
|
||||
},
|
||||
AdminCommand::ListAppservices => {
|
||||
if let Ok(appservices) = services()
|
||||
.appservice
|
||||
.iter_ids()
|
||||
.map(|ids| ids.collect::<Vec<_>>())
|
||||
{
|
||||
let count = appservices.len();
|
||||
let appservices = services().appservice.iter_ids().await;
|
||||
let output = format!(
|
||||
"Appservices ({}): {}",
|
||||
count,
|
||||
appservices
|
||||
.into_iter()
|
||||
.filter_map(|r| r.ok())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
appservices.len(),
|
||||
appservices.join(", ")
|
||||
);
|
||||
RoomMessageEventContent::text_plain(output)
|
||||
} else {
|
||||
RoomMessageEventContent::text_plain("Failed to get appservices.")
|
||||
}
|
||||
}
|
||||
AdminCommand::ListRooms => {
|
||||
let room_ids = services().rooms.metadata.iter_ids();
|
||||
|
@ -434,11 +407,7 @@ impl Service {
|
|||
Err(e) => RoomMessageEventContent::text_plain(e.to_string()),
|
||||
},
|
||||
AdminCommand::IncomingFederation => {
|
||||
let map = services()
|
||||
.globals
|
||||
.roomid_federationhandletime
|
||||
.read()
|
||||
.unwrap();
|
||||
let map = services().globals.roomid_federationhandletime.read().await;
|
||||
let mut msg: String = format!("Handling {} incoming pdus:\n", map.len());
|
||||
|
||||
for (r, (e, i)) in map.iter() {
|
||||
|
@ -552,7 +521,7 @@ impl Service {
|
|||
}
|
||||
}
|
||||
AdminCommand::MemoryUsage => {
|
||||
let response1 = services().memory_usage();
|
||||
let response1 = services().memory_usage().await;
|
||||
let response2 = services().globals.db.memory_usage();
|
||||
|
||||
RoomMessageEventContent::text_plain(format!(
|
||||
|
@ -565,7 +534,7 @@ impl Service {
|
|||
RoomMessageEventContent::text_plain("Done.")
|
||||
}
|
||||
AdminCommand::ClearServiceCaches { amount } => {
|
||||
services().clear_caches(amount);
|
||||
services().clear_caches(amount).await;
|
||||
|
||||
RoomMessageEventContent::text_plain("Done.")
|
||||
}
|
||||
|
@ -586,6 +555,13 @@ impl Service {
|
|||
}
|
||||
};
|
||||
|
||||
// Checks if user is local
|
||||
if user_id.server_name() != services().globals.server_name() {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"The specified user is not from this server!",
|
||||
));
|
||||
};
|
||||
|
||||
// Check if the specified user is valid
|
||||
if !services().users.exists(&user_id)?
|
||||
|| user_id
|
||||
|
@ -689,7 +665,15 @@ impl Service {
|
|||
user_id,
|
||||
} => {
|
||||
let user_id = Arc::<UserId>::from(user_id);
|
||||
if services().users.exists(&user_id)? {
|
||||
if !services().users.exists(&user_id)? {
|
||||
RoomMessageEventContent::text_plain(format!(
|
||||
"User {user_id} doesn't exist on this server"
|
||||
))
|
||||
} else if user_id.server_name() != services().globals.server_name() {
|
||||
RoomMessageEventContent::text_plain(format!(
|
||||
"User {user_id} is not from this server"
|
||||
))
|
||||
} else {
|
||||
RoomMessageEventContent::text_plain(format!(
|
||||
"Making {user_id} leave all rooms before deactivation..."
|
||||
));
|
||||
|
@ -703,30 +687,76 @@ impl Service {
|
|||
RoomMessageEventContent::text_plain(format!(
|
||||
"User {user_id} has been deactivated"
|
||||
))
|
||||
} else {
|
||||
RoomMessageEventContent::text_plain(format!(
|
||||
"User {user_id} doesn't exist on this server"
|
||||
))
|
||||
}
|
||||
}
|
||||
AdminCommand::DeactivateAll { leave_rooms, force } => {
|
||||
if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```"
|
||||
{
|
||||
let usernames = body.clone().drain(1..body.len() - 1).collect::<Vec<_>>();
|
||||
let users = body.clone().drain(1..body.len() - 1).collect::<Vec<_>>();
|
||||
|
||||
let mut user_ids: Vec<&UserId> = Vec::new();
|
||||
let mut user_ids = Vec::new();
|
||||
let mut remote_ids = Vec::new();
|
||||
let mut non_existant_ids = Vec::new();
|
||||
let mut invalid_users = Vec::new();
|
||||
|
||||
for &username in &usernames {
|
||||
match <&UserId>::try_from(username) {
|
||||
Ok(user_id) => user_ids.push(user_id),
|
||||
for &user in &users {
|
||||
match <&UserId>::try_from(user) {
|
||||
Ok(user_id) => {
|
||||
if user_id.server_name() != services().globals.server_name() {
|
||||
remote_ids.push(user_id)
|
||||
} else if !services().users.exists(user_id)? {
|
||||
non_existant_ids.push(user_id)
|
||||
} else {
|
||||
user_ids.push(user_id)
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"{username} is not a valid username"
|
||||
)))
|
||||
invalid_users.push(user);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut markdown_message = String::new();
|
||||
let mut html_message = String::new();
|
||||
if !invalid_users.is_empty() {
|
||||
markdown_message.push_str("The following user ids are not valid:\n```\n");
|
||||
html_message.push_str("The following user ids are not valid:\n<pre>\n");
|
||||
for invalid_user in invalid_users {
|
||||
markdown_message.push_str(&format!("{invalid_user}\n"));
|
||||
html_message.push_str(&format!("{invalid_user}\n"));
|
||||
}
|
||||
markdown_message.push_str("```\n\n");
|
||||
html_message.push_str("</pre>\n\n");
|
||||
}
|
||||
if !remote_ids.is_empty() {
|
||||
markdown_message
|
||||
.push_str("The following users are not from this server:\n```\n");
|
||||
html_message
|
||||
.push_str("The following users are not from this server:\n<pre>\n");
|
||||
for remote_id in remote_ids {
|
||||
markdown_message.push_str(&format!("{remote_id}\n"));
|
||||
html_message.push_str(&format!("{remote_id}\n"));
|
||||
}
|
||||
markdown_message.push_str("```\n\n");
|
||||
html_message.push_str("</pre>\n\n");
|
||||
}
|
||||
if !non_existant_ids.is_empty() {
|
||||
markdown_message.push_str("The following users do not exist:\n```\n");
|
||||
html_message.push_str("The following users do not exist:\n<pre>\n");
|
||||
for non_existant_id in non_existant_ids {
|
||||
markdown_message.push_str(&format!("{non_existant_id}\n"));
|
||||
html_message.push_str(&format!("{non_existant_id}\n"));
|
||||
}
|
||||
markdown_message.push_str("```\n\n");
|
||||
html_message.push_str("</pre>\n\n");
|
||||
}
|
||||
if !markdown_message.is_empty() {
|
||||
return Ok(RoomMessageEventContent::text_html(
|
||||
markdown_message,
|
||||
html_message,
|
||||
));
|
||||
}
|
||||
|
||||
let mut deactivation_count = 0;
|
||||
let mut admins = Vec::new();
|
||||
|
||||
|
@ -806,7 +836,7 @@ impl Service {
|
|||
.fetch_required_signing_keys(&value, &pub_key_map)
|
||||
.await?;
|
||||
|
||||
let pub_key_map = pub_key_map.read().unwrap();
|
||||
let pub_key_map = pub_key_map.read().await;
|
||||
match ruma::signatures::verify_json(&pub_key_map, &value) {
|
||||
Ok(_) => RoomMessageEventContent::text_plain("Signature correct"),
|
||||
Err(e) => RoomMessageEventContent::text_plain(format!(
|
||||
|
@ -858,12 +888,15 @@ impl Service {
|
|||
.expect("Regex compilation should not fail");
|
||||
let text = re.replace_all(&text, "<code>$1</code>: $4");
|
||||
|
||||
// Look for a `[commandbody]` tag. If it exists, use all lines below it that
|
||||
// Look for a `[commandbody]()` tag. If it exists, use all lines below it that
|
||||
// start with a `#` in the USAGE section.
|
||||
let mut text_lines: Vec<&str> = text.lines().collect();
|
||||
let mut command_body = String::new();
|
||||
|
||||
if let Some(line_index) = text_lines.iter().position(|line| *line == "[commandbody]") {
|
||||
if let Some(line_index) = text_lines
|
||||
.iter()
|
||||
.position(|line| *line == "[commandbody]()")
|
||||
{
|
||||
text_lines.remove(line_index);
|
||||
|
||||
while text_lines
|
||||
|
@ -919,7 +952,7 @@ impl Service {
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -952,7 +985,10 @@ impl Service {
|
|||
content.room_version = room_version;
|
||||
|
||||
// 1. The room create event
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomCreate,
|
||||
content: to_raw_value(&content).expect("event is valid, we just created it"),
|
||||
|
@ -963,10 +999,14 @@ impl Service {
|
|||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 2. Make conduit bot join
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
|
@ -987,13 +1027,17 @@ impl Service {
|
|||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 3. Power levels
|
||||
let mut users = BTreeMap::new();
|
||||
users.insert(conduit_user.clone(), 100.into());
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomPowerLevels,
|
||||
content: to_raw_value(&RoomPowerLevelsEventContent {
|
||||
|
@ -1008,10 +1052,14 @@ impl Service {
|
|||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 4.1 Join Rules
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomJoinRules,
|
||||
content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite))
|
||||
|
@ -1023,10 +1071,14 @@ impl Service {
|
|||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 4.2 History Visibility
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomHistoryVisibility,
|
||||
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
|
||||
|
@ -1040,13 +1092,19 @@ impl Service {
|
|||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 4.3 Guest Access
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomGuestAccess,
|
||||
content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden))
|
||||
content: to_raw_value(&RoomGuestAccessEventContent::new(
|
||||
GuestAccess::Forbidden,
|
||||
))
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
|
@ -1055,14 +1113,18 @@ impl Service {
|
|||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 5. Events implied by name and topic
|
||||
let room_name = format!("{} Admin Room", services().globals.server_name());
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomName,
|
||||
content: to_raw_value(&RoomNameEventContent::new(Some(room_name)))
|
||||
content: to_raw_value(&RoomNameEventContent::new(room_name))
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
|
@ -1071,9 +1133,13 @@ impl Service {
|
|||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomTopic,
|
||||
content: to_raw_value(&RoomTopicEventContent {
|
||||
|
@ -1087,14 +1153,18 @@ impl Service {
|
|||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 6. Room alias
|
||||
let alias: OwnedRoomAliasId = format!("#admins:{}", services().globals.server_name())
|
||||
.try_into()
|
||||
.expect("#admins:server_name is a valid alias name");
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomCanonicalAlias,
|
||||
content: to_raw_value(&RoomCanonicalAliasEventContent {
|
||||
|
@ -1109,13 +1179,29 @@ impl Service {
|
|||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
services().rooms.alias.set_alias(&alias, &room_id)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Gets the room ID of the admin room
|
||||
///
|
||||
/// Errors are propagated from the database, and will have None if there is no admin room
|
||||
pub(crate) fn get_admin_room(&self) -> Result<Option<OwnedRoomId>> {
|
||||
let admin_room_alias: Box<RoomAliasId> =
|
||||
format!("#admins:{}", services().globals.server_name())
|
||||
.try_into()
|
||||
.expect("#admins:server_name is a valid alias name");
|
||||
|
||||
services()
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_local_alias(&admin_room_alias)
|
||||
}
|
||||
|
||||
/// Invite the user to the conduit admin room.
|
||||
///
|
||||
/// In conduit, this is equivalent to granting admin privileges.
|
||||
|
@ -1124,22 +1210,13 @@ impl Service {
|
|||
user_id: &UserId,
|
||||
displayname: String,
|
||||
) -> Result<()> {
|
||||
let admin_room_alias: Box<RoomAliasId> =
|
||||
format!("#admins:{}", services().globals.server_name())
|
||||
.try_into()
|
||||
.expect("#admins:server_name is a valid alias name");
|
||||
let room_id = services()
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_local_alias(&admin_room_alias)?
|
||||
.expect("Admin room must exist");
|
||||
|
||||
if let Some(room_id) = services().admin.get_admin_room()? {
|
||||
let mutex_state = Arc::clone(
|
||||
services()
|
||||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -1151,7 +1228,10 @@ impl Service {
|
|||
.expect("@conduit:server_name is valid");
|
||||
|
||||
// Invite and join the real user
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
|
@ -1172,8 +1252,12 @@ impl Service {
|
|||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
)
|
||||
.await?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
|
@ -1194,14 +1278,18 @@ impl Service {
|
|||
user_id,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Set power level
|
||||
let mut users = BTreeMap::new();
|
||||
users.insert(conduit_user.to_owned(), 100.into());
|
||||
users.insert(user_id.to_owned(), 100.into());
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomPowerLevels,
|
||||
content: to_raw_value(&RoomPowerLevelsEventContent {
|
||||
|
@ -1216,7 +1304,8 @@ impl Service {
|
|||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Send welcome message
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
|
@ -1234,8 +1323,8 @@ impl Service {
|
|||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
|
||||
).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
use ruma::api::appservice::Registration;
|
||||
|
||||
use crate::Result;
|
||||
|
||||
pub trait Data: Send + Sync {
|
||||
/// Registers an appservice and returns the ID to the caller
|
||||
fn register_appservice(&self, yaml: serde_yaml::Value) -> Result<String>;
|
||||
fn register_appservice(&self, yaml: Registration) -> Result<String>;
|
||||
|
||||
/// Remove an appservice registration
|
||||
///
|
||||
|
@ -11,9 +13,9 @@ pub trait Data: Send + Sync {
|
|||
/// * `service_name` - the name you send to register the service previously
|
||||
fn unregister_appservice(&self, service_name: &str) -> Result<()>;
|
||||
|
||||
fn get_registration(&self, id: &str) -> Result<Option<serde_yaml::Value>>;
|
||||
fn get_registration(&self, id: &str) -> Result<Option<Registration>>;
|
||||
|
||||
fn iter_ids<'a>(&'a self) -> Result<Box<dyn Iterator<Item = Result<String>> + 'a>>;
|
||||
|
||||
fn all(&self) -> Result<Vec<(String, serde_yaml::Value)>>;
|
||||
fn all(&self) -> Result<Vec<(String, Registration)>>;
|
||||
}
|
||||
|
|
|
@ -1,37 +1,184 @@
|
|||
mod data;
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
pub use data::Data;
|
||||
|
||||
use crate::Result;
|
||||
use futures_util::Future;
|
||||
use regex::RegexSet;
|
||||
use ruma::api::appservice::{Namespace, Registration};
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::{services, Result};
|
||||
|
||||
/// Compiled regular expressions for a namespace.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct NamespaceRegex {
|
||||
pub exclusive: Option<RegexSet>,
|
||||
pub non_exclusive: Option<RegexSet>,
|
||||
}
|
||||
|
||||
impl NamespaceRegex {
|
||||
/// Checks if this namespace has rights to a namespace
|
||||
pub fn is_match(&self, heystack: &str) -> bool {
|
||||
if self.is_exclusive_match(heystack) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if let Some(non_exclusive) = &self.non_exclusive {
|
||||
if non_exclusive.is_match(heystack) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Checks if this namespace has exlusive rights to a namespace
|
||||
pub fn is_exclusive_match(&self, heystack: &str) -> bool {
|
||||
if let Some(exclusive) = &self.exclusive {
|
||||
if exclusive.is_match(heystack) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<Vec<Namespace>> for NamespaceRegex {
|
||||
fn try_from(value: Vec<Namespace>) -> Result<Self, regex::Error> {
|
||||
let mut exclusive = vec![];
|
||||
let mut non_exclusive = vec![];
|
||||
|
||||
for namespace in value {
|
||||
if namespace.exclusive {
|
||||
exclusive.push(namespace.regex);
|
||||
} else {
|
||||
non_exclusive.push(namespace.regex);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(NamespaceRegex {
|
||||
exclusive: if exclusive.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(RegexSet::new(exclusive)?)
|
||||
},
|
||||
non_exclusive: if non_exclusive.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(RegexSet::new(non_exclusive)?)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type Error = regex::Error;
|
||||
}
|
||||
|
||||
/// Appservice registration combined with its compiled regular expressions.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RegistrationInfo {
|
||||
pub registration: Registration,
|
||||
pub users: NamespaceRegex,
|
||||
pub aliases: NamespaceRegex,
|
||||
pub rooms: NamespaceRegex,
|
||||
}
|
||||
|
||||
impl TryFrom<Registration> for RegistrationInfo {
|
||||
fn try_from(value: Registration) -> Result<RegistrationInfo, regex::Error> {
|
||||
Ok(RegistrationInfo {
|
||||
users: value.namespaces.users.clone().try_into()?,
|
||||
aliases: value.namespaces.aliases.clone().try_into()?,
|
||||
rooms: value.namespaces.rooms.clone().try_into()?,
|
||||
registration: value,
|
||||
})
|
||||
}
|
||||
|
||||
type Error = regex::Error;
|
||||
}
|
||||
|
||||
pub struct Service {
|
||||
pub db: &'static dyn Data,
|
||||
registration_info: RwLock<BTreeMap<String, RegistrationInfo>>,
|
||||
}
|
||||
|
||||
impl Service {
|
||||
/// Registers an appservice and returns the ID to the caller
|
||||
pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result<String> {
|
||||
pub fn build(db: &'static dyn Data) -> Result<Self> {
|
||||
let mut registration_info = BTreeMap::new();
|
||||
// Inserting registrations into cache
|
||||
for appservice in db.all()? {
|
||||
registration_info.insert(
|
||||
appservice.0,
|
||||
appservice
|
||||
.1
|
||||
.try_into()
|
||||
.expect("Should be validated on registration"),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
db,
|
||||
registration_info: RwLock::new(registration_info),
|
||||
})
|
||||
}
|
||||
/// Registers an appservice and returns the ID to the caller.
|
||||
pub async fn register_appservice(&self, yaml: Registration) -> Result<String> {
|
||||
services()
|
||||
.appservice
|
||||
.registration_info
|
||||
.write()
|
||||
.await
|
||||
.insert(yaml.id.clone(), yaml.clone().try_into()?);
|
||||
|
||||
self.db.register_appservice(yaml)
|
||||
}
|
||||
|
||||
/// Remove an appservice registration
|
||||
/// Removes an appservice registration.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `service_name` - the name you send to register the service previously
|
||||
pub fn unregister_appservice(&self, service_name: &str) -> Result<()> {
|
||||
pub async fn unregister_appservice(&self, service_name: &str) -> Result<()> {
|
||||
services()
|
||||
.appservice
|
||||
.registration_info
|
||||
.write()
|
||||
.await
|
||||
.remove(service_name)
|
||||
.ok_or_else(|| crate::Error::AdminCommand("Appservice not found"))?;
|
||||
|
||||
self.db.unregister_appservice(service_name)
|
||||
}
|
||||
|
||||
pub fn get_registration(&self, id: &str) -> Result<Option<serde_yaml::Value>> {
|
||||
self.db.get_registration(id)
|
||||
pub async fn get_registration(&self, id: &str) -> Option<Registration> {
|
||||
self.registration_info
|
||||
.read()
|
||||
.await
|
||||
.get(id)
|
||||
.cloned()
|
||||
.map(|info| info.registration)
|
||||
}
|
||||
|
||||
pub fn iter_ids(&self) -> Result<impl Iterator<Item = Result<String>> + '_> {
|
||||
self.db.iter_ids()
|
||||
pub async fn iter_ids(&self) -> Vec<String> {
|
||||
self.registration_info
|
||||
.read()
|
||||
.await
|
||||
.keys()
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn all(&self) -> Result<Vec<(String, serde_yaml::Value)>> {
|
||||
self.db.all()
|
||||
pub async fn find_from_token(&self, token: &str) -> Option<RegistrationInfo> {
|
||||
self.read()
|
||||
.await
|
||||
.values()
|
||||
.find(|info| info.registration.as_token == token)
|
||||
.cloned()
|
||||
}
|
||||
|
||||
pub fn read(
|
||||
&self,
|
||||
) -> impl Future<Output = tokio::sync::RwLockReadGuard<'_, BTreeMap<String, RegistrationInfo>>>
|
||||
{
|
||||
self.registration_info.read()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,6 +8,12 @@ use ruma::{
|
|||
use crate::api::server_server::FedDest;
|
||||
|
||||
use crate::{services, Config, Error, Result};
|
||||
use futures_util::FutureExt;
|
||||
use hyper::{
|
||||
client::connect::dns::{GaiResolver, Name},
|
||||
service::Service as HyperService,
|
||||
};
|
||||
use reqwest::dns::{Addrs, Resolve, Resolving};
|
||||
use ruma::{
|
||||
api::{
|
||||
client::sync::sync_events,
|
||||
|
@ -17,17 +23,19 @@ use ruma::{
|
|||
};
|
||||
use std::{
|
||||
collections::{BTreeMap, HashMap},
|
||||
error::Error as StdError,
|
||||
fs,
|
||||
future::Future,
|
||||
future::{self, Future},
|
||||
iter,
|
||||
net::{IpAddr, SocketAddr},
|
||||
path::PathBuf,
|
||||
sync::{
|
||||
atomic::{self, AtomicBool},
|
||||
Arc, Mutex, RwLock,
|
||||
Arc, RwLock as StdRwLock,
|
||||
},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tokio::sync::{broadcast, watch::Receiver, Mutex as TokioMutex, Semaphore};
|
||||
use tokio::sync::{broadcast, watch::Receiver, Mutex, RwLock, Semaphore};
|
||||
use tracing::{error, info};
|
||||
use trust_dns_resolver::TokioAsyncResolver;
|
||||
|
||||
|
@ -45,7 +53,7 @@ pub struct Service {
|
|||
pub db: &'static dyn Data,
|
||||
|
||||
pub actual_destination_cache: Arc<RwLock<WellKnownMap>>, // actual_destination, host
|
||||
pub tls_name_override: Arc<RwLock<TlsNameMap>>,
|
||||
pub tls_name_override: Arc<StdRwLock<TlsNameMap>>,
|
||||
pub config: Config,
|
||||
keypair: Arc<ruma::signatures::Ed25519KeyPair>,
|
||||
dns_resolver: TokioAsyncResolver,
|
||||
|
@ -60,8 +68,8 @@ pub struct Service {
|
|||
pub servername_ratelimiter: Arc<RwLock<HashMap<OwnedServerName, Arc<Semaphore>>>>,
|
||||
pub sync_receivers: RwLock<HashMap<(OwnedUserId, OwnedDeviceId), SyncHandle>>,
|
||||
pub roomid_mutex_insert: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>,
|
||||
pub roomid_mutex_state: RwLock<HashMap<OwnedRoomId, Arc<TokioMutex<()>>>>,
|
||||
pub roomid_mutex_federation: RwLock<HashMap<OwnedRoomId, Arc<TokioMutex<()>>>>, // this lock will be held longer
|
||||
pub roomid_mutex_state: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>,
|
||||
pub roomid_mutex_federation: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>, // this lock will be held longer
|
||||
pub roomid_federationhandletime: RwLock<HashMap<OwnedRoomId, (OwnedEventId, Instant)>>,
|
||||
pub stateres_mutex: Arc<Mutex<()>>,
|
||||
pub rotate: RotationHandler,
|
||||
|
@ -99,6 +107,45 @@ impl Default for RotationHandler {
|
|||
}
|
||||
}
|
||||
|
||||
pub struct Resolver {
|
||||
inner: GaiResolver,
|
||||
overrides: Arc<StdRwLock<TlsNameMap>>,
|
||||
}
|
||||
|
||||
impl Resolver {
|
||||
pub fn new(overrides: Arc<StdRwLock<TlsNameMap>>) -> Self {
|
||||
Resolver {
|
||||
inner: GaiResolver::new(),
|
||||
overrides,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve for Resolver {
|
||||
fn resolve(&self, name: Name) -> Resolving {
|
||||
self.overrides
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(name.as_str())
|
||||
.and_then(|(override_name, port)| {
|
||||
override_name.first().map(|first_name| {
|
||||
let x: Box<dyn Iterator<Item = SocketAddr> + Send> =
|
||||
Box::new(iter::once(SocketAddr::new(*first_name, *port)));
|
||||
let x: Resolving = Box::pin(future::ready(Ok(x)));
|
||||
x
|
||||
})
|
||||
})
|
||||
.unwrap_or_else(|| {
|
||||
let this = &mut self.inner.clone();
|
||||
Box::pin(HyperService::<Name>::call(this, name).map(|result| {
|
||||
result
|
||||
.map(|addrs| -> Addrs { Box::new(addrs) })
|
||||
.map_err(|err| -> Box<dyn StdError + Send + Sync> { Box::new(err) })
|
||||
}))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Service {
|
||||
pub fn load(db: &'static dyn Data, config: Config) -> Result<Self> {
|
||||
let keypair = db.load_keypair();
|
||||
|
@ -112,7 +159,7 @@ impl Service {
|
|||
}
|
||||
};
|
||||
|
||||
let tls_name_override = Arc::new(RwLock::new(TlsNameMap::new()));
|
||||
let tls_name_override = Arc::new(StdRwLock::new(TlsNameMap::new()));
|
||||
|
||||
let jwt_decoding_key = config
|
||||
.jwt_secret
|
||||
|
@ -120,14 +167,8 @@ impl Service {
|
|||
.map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()));
|
||||
|
||||
let default_client = reqwest_client_builder(&config)?.build()?;
|
||||
let name_override = Arc::clone(&tls_name_override);
|
||||
let federation_client = reqwest_client_builder(&config)?
|
||||
.resolve_fn(move |domain| {
|
||||
let read_guard = name_override.read().unwrap();
|
||||
let (override_name, port) = read_guard.get(&domain)?;
|
||||
let first_name = override_name.get(0)?;
|
||||
Some(SocketAddr::new(*first_name, *port))
|
||||
})
|
||||
.dns_resolver(Arc::new(Resolver::new(tls_name_override.clone())))
|
||||
.build()?;
|
||||
|
||||
// Supported and stable room versions
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
use std::{
|
||||
collections::{BTreeMap, HashMap},
|
||||
sync::{Arc, Mutex},
|
||||
sync::{Arc, Mutex as StdMutex},
|
||||
};
|
||||
|
||||
use lru_cache::LruCache;
|
||||
use tokio::sync::{broadcast, Mutex};
|
||||
|
||||
use crate::{Config, Result};
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
pub mod account_data;
|
||||
pub mod admin;
|
||||
|
@ -55,7 +57,7 @@ impl Services {
|
|||
config: Config,
|
||||
) -> Result<Self> {
|
||||
Ok(Self {
|
||||
appservice: appservice::Service { db },
|
||||
appservice: appservice::Service::build(db)?,
|
||||
pusher: pusher::Service { db },
|
||||
rooms: rooms::Service {
|
||||
alias: rooms::alias::Service { db },
|
||||
|
@ -64,7 +66,11 @@ impl Services {
|
|||
edus: rooms::edus::Service {
|
||||
presence: rooms::edus::presence::Service { db },
|
||||
read_receipt: rooms::edus::read_receipt::Service { db },
|
||||
typing: rooms::edus::typing::Service { db },
|
||||
typing: rooms::edus::typing::Service {
|
||||
typing: RwLock::new(BTreeMap::new()),
|
||||
last_typing_update: RwLock::new(BTreeMap::new()),
|
||||
typing_update_sender: broadcast::channel(100).0,
|
||||
},
|
||||
},
|
||||
event_handler: rooms::event_handler::Service,
|
||||
lazy_loading: rooms::lazy_loading::Service {
|
||||
|
@ -79,17 +85,17 @@ impl Services {
|
|||
state: rooms::state::Service { db },
|
||||
state_accessor: rooms::state_accessor::Service {
|
||||
db,
|
||||
server_visibility_cache: Mutex::new(LruCache::new(
|
||||
server_visibility_cache: StdMutex::new(LruCache::new(
|
||||
(100.0 * config.conduit_cache_capacity_modifier) as usize,
|
||||
)),
|
||||
user_visibility_cache: Mutex::new(LruCache::new(
|
||||
user_visibility_cache: StdMutex::new(LruCache::new(
|
||||
(100.0 * config.conduit_cache_capacity_modifier) as usize,
|
||||
)),
|
||||
},
|
||||
state_cache: rooms::state_cache::Service { db },
|
||||
state_compressor: rooms::state_compressor::Service {
|
||||
db,
|
||||
stateinfo_cache: Mutex::new(LruCache::new(
|
||||
stateinfo_cache: StdMutex::new(LruCache::new(
|
||||
(100.0 * config.conduit_cache_capacity_modifier) as usize,
|
||||
)),
|
||||
},
|
||||
|
@ -107,7 +113,7 @@ impl Services {
|
|||
uiaa: uiaa::Service { db },
|
||||
users: users::Service {
|
||||
db,
|
||||
connections: Mutex::new(BTreeMap::new()),
|
||||
connections: StdMutex::new(BTreeMap::new()),
|
||||
},
|
||||
account_data: account_data::Service { db },
|
||||
admin: admin::Service::build(),
|
||||
|
@ -118,14 +124,8 @@ impl Services {
|
|||
globals: globals::Service::load(db, config)?,
|
||||
})
|
||||
}
|
||||
fn memory_usage(&self) -> String {
|
||||
let lazy_load_waiting = self
|
||||
.rooms
|
||||
.lazy_loading
|
||||
.lazy_load_waiting
|
||||
.lock()
|
||||
.unwrap()
|
||||
.len();
|
||||
async fn memory_usage(&self) -> String {
|
||||
let lazy_load_waiting = self.rooms.lazy_loading.lazy_load_waiting.lock().await.len();
|
||||
let server_visibility_cache = self
|
||||
.rooms
|
||||
.state_accessor
|
||||
|
@ -152,15 +152,9 @@ impl Services {
|
|||
.timeline
|
||||
.lasttimelinecount_cache
|
||||
.lock()
|
||||
.unwrap()
|
||||
.len();
|
||||
let roomid_spacechunk_cache = self
|
||||
.rooms
|
||||
.spaces
|
||||
.roomid_spacechunk_cache
|
||||
.lock()
|
||||
.unwrap()
|
||||
.await
|
||||
.len();
|
||||
let roomid_spacechunk_cache = self.rooms.spaces.roomid_spacechunk_cache.lock().await.len();
|
||||
|
||||
format!(
|
||||
"\
|
||||
|
@ -173,13 +167,13 @@ roomid_spacechunk_cache: {roomid_spacechunk_cache}\
|
|||
"
|
||||
)
|
||||
}
|
||||
fn clear_caches(&self, amount: u32) {
|
||||
async fn clear_caches(&self, amount: u32) {
|
||||
if amount > 0 {
|
||||
self.rooms
|
||||
.lazy_loading
|
||||
.lazy_load_waiting
|
||||
.lock()
|
||||
.unwrap()
|
||||
.await
|
||||
.clear();
|
||||
}
|
||||
if amount > 1 {
|
||||
|
@ -211,7 +205,7 @@ roomid_spacechunk_cache: {roomid_spacechunk_cache}\
|
|||
.timeline
|
||||
.lasttimelinecount_cache
|
||||
.lock()
|
||||
.unwrap()
|
||||
.await
|
||||
.clear();
|
||||
}
|
||||
if amount > 5 {
|
||||
|
@ -219,7 +213,7 @@ roomid_spacechunk_cache: {roomid_spacechunk_cache}\
|
|||
.spaces
|
||||
.roomid_spacechunk_cache
|
||||
.lock()
|
||||
.unwrap()
|
||||
.await
|
||||
.clear();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -365,7 +365,7 @@ impl PartialEq for PduEvent {
|
|||
}
|
||||
impl PartialOrd for PduEvent {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
self.event_id.partial_cmp(&other.event_id)
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
impl Ord for PduEvent {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
mod data;
|
||||
pub use data::Data;
|
||||
use ruma::events::AnySyncTimelineEvent;
|
||||
use ruma::{events::AnySyncTimelineEvent, push::PushConditionPowerLevelsCtx};
|
||||
|
||||
use crate::{services, Error, PduEvent, Result};
|
||||
use bytes::BytesMut;
|
||||
|
@ -66,8 +66,7 @@ impl Service {
|
|||
})?
|
||||
.map(|body| body.freeze());
|
||||
|
||||
let reqwest_request = reqwest::Request::try_from(http_request)
|
||||
.expect("all http requests are valid reqwest requests");
|
||||
let reqwest_request = reqwest::Request::try_from(http_request)?;
|
||||
|
||||
// TODO: we could keep this very short and let expo backoff do it's thing...
|
||||
//*reqwest_request.timeout_mut() = Some(Duration::from_secs(5));
|
||||
|
@ -193,6 +192,12 @@ impl Service {
|
|||
pdu: &Raw<AnySyncTimelineEvent>,
|
||||
room_id: &RoomId,
|
||||
) -> Result<&'a [Action]> {
|
||||
let power_levels = PushConditionPowerLevelsCtx {
|
||||
users: power_levels.users.clone(),
|
||||
users_default: power_levels.users_default,
|
||||
notifications: power_levels.notifications.clone(),
|
||||
};
|
||||
|
||||
let ctx = PushConditionRoomCtx {
|
||||
room_id: room_id.to_owned(),
|
||||
member_count: 10_u32.into(), // TODO: get member count efficiently
|
||||
|
@ -201,9 +206,7 @@ impl Service {
|
|||
.users
|
||||
.displayname(user)?
|
||||
.unwrap_or_else(|| user.localpart().to_owned()),
|
||||
users_power_levels: power_levels.users.clone(),
|
||||
default_power_level: power_levels.users_default,
|
||||
notification_power_levels: power_levels.notifications.clone(),
|
||||
power_levels: Some(power_levels),
|
||||
};
|
||||
|
||||
Ok(ruleset.get_actions(pdu, &ctx))
|
||||
|
|
|
@ -2,7 +2,7 @@ pub mod presence;
|
|||
pub mod read_receipt;
|
||||
pub mod typing;
|
||||
|
||||
pub trait Data: presence::Data + read_receipt::Data + typing::Data + 'static {}
|
||||
pub trait Data: presence::Data + read_receipt::Data + 'static {}
|
||||
|
||||
pub struct Service {
|
||||
pub presence: presence::Service,
|
||||
|
|
|
@ -17,29 +17,32 @@ impl Service {
|
|||
/// make sure users outside these rooms can't see them.
|
||||
pub fn update_presence(
|
||||
&self,
|
||||
user_id: &UserId,
|
||||
room_id: &RoomId,
|
||||
presence: PresenceEvent,
|
||||
_user_id: &UserId,
|
||||
_room_id: &RoomId,
|
||||
_presence: PresenceEvent,
|
||||
) -> Result<()> {
|
||||
self.db.update_presence(user_id, room_id, presence)
|
||||
// self.db.update_presence(user_id, room_id, presence)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Resets the presence timeout, so the user will stay in their current presence state.
|
||||
pub fn ping_presence(&self, user_id: &UserId) -> Result<()> {
|
||||
self.db.ping_presence(user_id)
|
||||
pub fn ping_presence(&self, _user_id: &UserId) -> Result<()> {
|
||||
// self.db.ping_presence(user_id)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_last_presence_event(
|
||||
&self,
|
||||
user_id: &UserId,
|
||||
room_id: &RoomId,
|
||||
_user_id: &UserId,
|
||||
_room_id: &RoomId,
|
||||
) -> Result<Option<PresenceEvent>> {
|
||||
let last_update = match self.db.last_presence_update(user_id)? {
|
||||
Some(last) => last,
|
||||
None => return Ok(None),
|
||||
};
|
||||
// let last_update = match self.db.last_presence_update(user_id)? {
|
||||
// Some(last) => last,
|
||||
// None => return Ok(None),
|
||||
// };
|
||||
|
||||
self.db.get_presence_event(room_id, user_id, last_update)
|
||||
// self.db.get_presence_event(room_id, user_id, last_update)
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/* TODO
|
||||
|
@ -111,12 +114,12 @@ impl Service {
|
|||
}*/
|
||||
|
||||
/// Returns the most recent presence updates that happened after the event with id `since`.
|
||||
#[tracing::instrument(skip(self, since, room_id))]
|
||||
pub fn presence_since(
|
||||
&self,
|
||||
room_id: &RoomId,
|
||||
since: u64,
|
||||
_room_id: &RoomId,
|
||||
_since: u64,
|
||||
) -> Result<HashMap<OwnedUserId, PresenceEvent>> {
|
||||
self.db.presence_since(room_id, since)
|
||||
// self.db.presence_since(room_id, since)
|
||||
Ok(HashMap::new())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@ pub trait Data: Send + Sync {
|
|||
) -> Result<()>;
|
||||
|
||||
/// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`.
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn readreceipts_since<'a>(
|
||||
&'a self,
|
||||
room_id: &RoomId,
|
||||
|
|
|
@ -1,21 +0,0 @@
|
|||
use crate::Result;
|
||||
use ruma::{OwnedUserId, RoomId, UserId};
|
||||
use std::collections::HashSet;
|
||||
|
||||
pub trait Data: Send + Sync {
|
||||
/// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is
|
||||
/// called.
|
||||
fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()>;
|
||||
|
||||
/// Removes a user from typing before the timeout is reached.
|
||||
fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>;
|
||||
|
||||
/// Makes sure that typing events with old timestamps get removed.
|
||||
fn typings_maintain(&self, room_id: &RoomId) -> Result<()>;
|
||||
|
||||
/// Returns the count of the last typing update in this room.
|
||||
fn last_typing_update(&self, room_id: &RoomId) -> Result<u64>;
|
||||
|
||||
/// Returns all user ids currently typing.
|
||||
fn typings_all(&self, room_id: &RoomId) -> Result<HashSet<OwnedUserId>>;
|
||||
}
|
|
@ -1,48 +1,117 @@
|
|||
mod data;
|
||||
use ruma::{events::SyncEphemeralRoomEvent, OwnedRoomId, OwnedUserId, RoomId, UserId};
|
||||
use std::collections::BTreeMap;
|
||||
use tokio::sync::{broadcast, RwLock};
|
||||
|
||||
pub use data::Data;
|
||||
use ruma::{events::SyncEphemeralRoomEvent, RoomId, UserId};
|
||||
|
||||
use crate::Result;
|
||||
use crate::{services, utils, Result};
|
||||
|
||||
pub struct Service {
|
||||
pub db: &'static dyn Data,
|
||||
pub typing: RwLock<BTreeMap<OwnedRoomId, BTreeMap<OwnedUserId, u64>>>, // u64 is unix timestamp of timeout
|
||||
pub last_typing_update: RwLock<BTreeMap<OwnedRoomId, u64>>, // timestamp of the last change to typing users
|
||||
pub typing_update_sender: broadcast::Sender<OwnedRoomId>,
|
||||
}
|
||||
|
||||
impl Service {
|
||||
/// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is
|
||||
/// called.
|
||||
pub fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> {
|
||||
self.db.typing_add(user_id, room_id, timeout)
|
||||
pub async fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> {
|
||||
self.typing
|
||||
.write()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default()
|
||||
.insert(user_id.to_owned(), timeout);
|
||||
self.last_typing_update
|
||||
.write()
|
||||
.await
|
||||
.insert(room_id.to_owned(), services().globals.next_count()?);
|
||||
let _ = self.typing_update_sender.send(room_id.to_owned());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Removes a user from typing before the timeout is reached.
|
||||
pub fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
||||
self.db.typing_remove(user_id, room_id)
|
||||
pub async fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
||||
self.typing
|
||||
.write()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default()
|
||||
.remove(user_id);
|
||||
self.last_typing_update
|
||||
.write()
|
||||
.await
|
||||
.insert(room_id.to_owned(), services().globals.next_count()?);
|
||||
let _ = self.typing_update_sender.send(room_id.to_owned());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn wait_for_update(&self, room_id: &RoomId) -> Result<()> {
|
||||
let mut receiver = self.typing_update_sender.subscribe();
|
||||
while let Ok(next) = receiver.recv().await {
|
||||
if next == room_id {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Makes sure that typing events with old timestamps get removed.
|
||||
fn typings_maintain(&self, room_id: &RoomId) -> Result<()> {
|
||||
self.db.typings_maintain(room_id)
|
||||
async fn typings_maintain(&self, room_id: &RoomId) -> Result<()> {
|
||||
let current_timestamp = utils::millis_since_unix_epoch();
|
||||
let mut removable = Vec::new();
|
||||
{
|
||||
let typing = self.typing.read().await;
|
||||
let Some(room) = typing.get(room_id) else {
|
||||
return Ok(());
|
||||
};
|
||||
for (user, timeout) in room {
|
||||
if *timeout < current_timestamp {
|
||||
removable.push(user.clone());
|
||||
}
|
||||
}
|
||||
drop(typing);
|
||||
}
|
||||
if !removable.is_empty() {
|
||||
let typing = &mut self.typing.write().await;
|
||||
let room = typing.entry(room_id.to_owned()).or_default();
|
||||
for user in removable {
|
||||
room.remove(&user);
|
||||
}
|
||||
self.last_typing_update
|
||||
.write()
|
||||
.await
|
||||
.insert(room_id.to_owned(), services().globals.next_count()?);
|
||||
let _ = self.typing_update_sender.send(room_id.to_owned());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the count of the last typing update in this room.
|
||||
pub fn last_typing_update(&self, room_id: &RoomId) -> Result<u64> {
|
||||
self.typings_maintain(room_id)?;
|
||||
|
||||
self.db.last_typing_update(room_id)
|
||||
pub async fn last_typing_update(&self, room_id: &RoomId) -> Result<u64> {
|
||||
self.typings_maintain(room_id).await?;
|
||||
Ok(self
|
||||
.last_typing_update
|
||||
.read()
|
||||
.await
|
||||
.get(room_id)
|
||||
.copied()
|
||||
.unwrap_or(0))
|
||||
}
|
||||
|
||||
/// Returns a new typing EDU.
|
||||
pub fn typings_all(
|
||||
pub async fn typings_all(
|
||||
&self,
|
||||
room_id: &RoomId,
|
||||
) -> Result<SyncEphemeralRoomEvent<ruma::events::typing::TypingEventContent>> {
|
||||
let user_ids = self.db.typings_all(room_id)?;
|
||||
|
||||
Ok(SyncEphemeralRoomEvent {
|
||||
content: ruma::events::typing::TypingEventContent {
|
||||
user_ids: user_ids.into_iter().collect(),
|
||||
user_ids: self
|
||||
.typing
|
||||
.read()
|
||||
.await
|
||||
.get(room_id)
|
||||
.map(|m| m.keys().cloned().collect())
|
||||
.unwrap_or_default(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
|
|
@ -1,25 +1,23 @@
|
|||
/// An async function that can recursively call itself.
|
||||
type AsyncRecursiveType<'a, T> = Pin<Box<dyn Future<Output = T> + 'a + Send>>;
|
||||
|
||||
use ruma::{
|
||||
api::federation::discovery::{get_remote_server_keys, get_server_keys},
|
||||
CanonicalJsonObject, CanonicalJsonValue, OwnedServerName, OwnedServerSigningKeyId,
|
||||
RoomVersionId,
|
||||
};
|
||||
use std::{
|
||||
collections::{hash_map, BTreeMap, HashMap, HashSet},
|
||||
pin::Pin,
|
||||
sync::{Arc, RwLock, RwLockWriteGuard},
|
||||
sync::Arc,
|
||||
time::{Duration, Instant, SystemTime},
|
||||
};
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use futures_util::{stream::FuturesUnordered, Future, StreamExt};
|
||||
use ruma::{
|
||||
api::{
|
||||
client::error::ErrorKind,
|
||||
federation::{
|
||||
discovery::get_remote_server_keys_batch::{self, v2::QueryCriteria},
|
||||
discovery::{
|
||||
get_remote_server_keys,
|
||||
get_remote_server_keys_batch::{self, v2::QueryCriteria},
|
||||
get_server_keys,
|
||||
},
|
||||
event::{get_event, get_room_state_ids},
|
||||
membership::create_join_event,
|
||||
},
|
||||
|
@ -31,9 +29,11 @@ use ruma::{
|
|||
int,
|
||||
serde::Base64,
|
||||
state_res::{self, RoomVersion, StateMap},
|
||||
uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName,
|
||||
uint, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch,
|
||||
OwnedServerName, OwnedServerSigningKeyId, RoomId, RoomVersionId, ServerName,
|
||||
};
|
||||
use serde_json::value::RawValue as RawJsonValue;
|
||||
use tokio::sync::{RwLock, RwLockWriteGuard, Semaphore};
|
||||
use tracing::{debug, error, info, trace, warn};
|
||||
|
||||
use crate::{service::*, services, Error, PduEvent, Result};
|
||||
|
@ -92,7 +92,7 @@ impl Service {
|
|||
));
|
||||
}
|
||||
|
||||
services().rooms.event_handler.acl_check(origin, &room_id)?;
|
||||
services().rooms.event_handler.acl_check(origin, room_id)?;
|
||||
|
||||
// 1. Skip the PDU if we already have it as a timeline event
|
||||
if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(event_id)? {
|
||||
|
@ -168,7 +168,7 @@ impl Service {
|
|||
.globals
|
||||
.bad_event_ratelimiter
|
||||
.read()
|
||||
.unwrap()
|
||||
.await
|
||||
.get(&*prev_id)
|
||||
{
|
||||
// Exponential backoff
|
||||
|
@ -184,7 +184,22 @@ impl Service {
|
|||
}
|
||||
|
||||
if errors >= 5 {
|
||||
break;
|
||||
// Timeout other events
|
||||
match services()
|
||||
.globals
|
||||
.bad_event_ratelimiter
|
||||
.write()
|
||||
.await
|
||||
.entry((*prev_id).to_owned())
|
||||
{
|
||||
hash_map::Entry::Vacant(e) => {
|
||||
e.insert((Instant::now(), 1));
|
||||
}
|
||||
hash_map::Entry::Occupied(mut e) => {
|
||||
*e.get_mut() = (Instant::now(), e.get().1 + 1)
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some((pdu, json)) = eventid_info.remove(&*prev_id) {
|
||||
|
@ -198,7 +213,7 @@ impl Service {
|
|||
.globals
|
||||
.roomid_federationhandletime
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time));
|
||||
|
||||
if let Err(e) = self
|
||||
|
@ -218,7 +233,7 @@ impl Service {
|
|||
.globals
|
||||
.bad_event_ratelimiter
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry((*prev_id).to_owned())
|
||||
{
|
||||
hash_map::Entry::Vacant(e) => {
|
||||
|
@ -234,7 +249,7 @@ impl Service {
|
|||
.globals
|
||||
.roomid_federationhandletime
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.remove(&room_id.to_owned());
|
||||
debug!(
|
||||
"Handling prev event {} took {}m{}s",
|
||||
|
@ -252,7 +267,7 @@ impl Service {
|
|||
.globals
|
||||
.roomid_federationhandletime
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.insert(room_id.to_owned(), (event_id.to_owned(), start_time));
|
||||
let r = services()
|
||||
.rooms
|
||||
|
@ -270,12 +285,13 @@ impl Service {
|
|||
.globals
|
||||
.roomid_federationhandletime
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.remove(&room_id.to_owned());
|
||||
|
||||
r
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity, clippy::too_many_arguments)]
|
||||
#[tracing::instrument(skip(self, create_event, value, pub_key_map))]
|
||||
fn handle_outlier_pdu<'a>(
|
||||
&'a self,
|
||||
|
@ -310,11 +326,8 @@ impl Service {
|
|||
let room_version =
|
||||
RoomVersion::new(room_version_id).expect("room version is supported");
|
||||
|
||||
let mut val = match ruma::signatures::verify_event(
|
||||
&pub_key_map.read().expect("RwLock is poisoned."),
|
||||
&value,
|
||||
room_version_id,
|
||||
) {
|
||||
let guard = pub_key_map.read().await;
|
||||
let mut val = match ruma::signatures::verify_event(&guard, &value, room_version_id) {
|
||||
Err(e) => {
|
||||
// Drop
|
||||
warn!("Dropping bad event {}: {}", event_id, e,);
|
||||
|
@ -349,6 +362,8 @@ impl Service {
|
|||
Ok(ruma::signatures::Verified::All) => value,
|
||||
};
|
||||
|
||||
drop(guard);
|
||||
|
||||
// Now that we have checked the signature and hashes we can add the eventID and convert
|
||||
// to our PduEvent type
|
||||
val.insert(
|
||||
|
@ -676,13 +691,15 @@ impl Service {
|
|||
{
|
||||
Ok(res) => {
|
||||
debug!("Fetching state events at event.");
|
||||
let collect = res
|
||||
.pdu_ids
|
||||
.iter()
|
||||
.map(|x| Arc::from(&**x))
|
||||
.collect::<Vec<_>>();
|
||||
let state_vec = self
|
||||
.fetch_and_handle_outliers(
|
||||
origin,
|
||||
&res.pdu_ids
|
||||
.iter()
|
||||
.map(|x| Arc::from(&**x))
|
||||
.collect::<Vec<_>>(),
|
||||
&collect,
|
||||
create_event,
|
||||
room_id,
|
||||
room_version_id,
|
||||
|
@ -789,7 +806,7 @@ impl Service {
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -868,14 +885,18 @@ impl Service {
|
|||
debug!("Starting soft fail auth check");
|
||||
|
||||
if soft_fail {
|
||||
services().rooms.timeline.append_incoming_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.append_incoming_pdu(
|
||||
&incoming_pdu,
|
||||
val,
|
||||
extremities.iter().map(|e| (**e).to_owned()).collect(),
|
||||
state_ids_compressed,
|
||||
soft_fail,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Soft fail, we keep the event as an outlier but don't add it to the timeline
|
||||
warn!("Event was soft failed: {:?}", incoming_pdu);
|
||||
|
@ -896,14 +917,18 @@ impl Service {
|
|||
// We use the `state_at_event` instead of `state_after` so we accurately
|
||||
// represent the state for this event.
|
||||
|
||||
let pdu_id = services().rooms.timeline.append_incoming_pdu(
|
||||
let pdu_id = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.append_incoming_pdu(
|
||||
&incoming_pdu,
|
||||
val,
|
||||
extremities.iter().map(|e| (**e).to_owned()).collect(),
|
||||
state_ids_compressed,
|
||||
soft_fail,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
debug!("Appended incoming pdu");
|
||||
|
||||
|
@ -965,14 +990,21 @@ impl Service {
|
|||
|
||||
debug!("Resolving state");
|
||||
|
||||
let lock = services().globals.stateres_mutex.lock();
|
||||
let state = match state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| {
|
||||
let fetch_event = |id: &_| {
|
||||
let res = services().rooms.timeline.get_pdu(id);
|
||||
if let Err(e) = &res {
|
||||
error!("LOOK AT ME Failed to fetch event: {}", e);
|
||||
}
|
||||
res.ok().flatten()
|
||||
}) {
|
||||
};
|
||||
|
||||
let lock = services().globals.stateres_mutex.lock();
|
||||
let state = match state_res::resolve(
|
||||
room_version_id,
|
||||
&fork_states,
|
||||
auth_chain_sets,
|
||||
fetch_event,
|
||||
) {
|
||||
Ok(new_state) => new_state,
|
||||
Err(_) => {
|
||||
return Err(Error::bad_database("State resolution failed, either an event could not be found or deserialization"));
|
||||
|
@ -1009,6 +1041,7 @@ impl Service {
|
|||
/// b. Look at outlier pdu tree
|
||||
/// c. Ask origin server over federation
|
||||
/// d. TODO: Ask other servers over federation?
|
||||
#[allow(clippy::type_complexity)]
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub(crate) fn fetch_and_handle_outliers<'a>(
|
||||
&'a self,
|
||||
|
@ -1021,17 +1054,21 @@ impl Service {
|
|||
) -> AsyncRecursiveType<'a, Vec<(Arc<PduEvent>, Option<BTreeMap<String, CanonicalJsonValue>>)>>
|
||||
{
|
||||
Box::pin(async move {
|
||||
let back_off = |id| match services()
|
||||
let back_off = |id| async move {
|
||||
match services()
|
||||
.globals
|
||||
.bad_event_ratelimiter
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(id)
|
||||
{
|
||||
hash_map::Entry::Vacant(e) => {
|
||||
e.insert((Instant::now(), 1));
|
||||
}
|
||||
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
||||
hash_map::Entry::Occupied(mut e) => {
|
||||
*e.get_mut() = (Instant::now(), e.get().1 + 1)
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let mut pdus = vec![];
|
||||
|
@ -1057,7 +1094,7 @@ impl Service {
|
|||
.globals
|
||||
.bad_event_ratelimiter
|
||||
.read()
|
||||
.unwrap()
|
||||
.await
|
||||
.get(&*next_id)
|
||||
{
|
||||
// Exponential backoff
|
||||
|
@ -1104,7 +1141,7 @@ impl Service {
|
|||
match pdu::gen_event_id_canonical_json(&res.pdu, room_version_id) {
|
||||
Ok(t) => t,
|
||||
Err(_) => {
|
||||
back_off((*next_id).to_owned());
|
||||
back_off((*next_id).to_owned()).await;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
@ -1136,7 +1173,7 @@ impl Service {
|
|||
}
|
||||
Err(_) => {
|
||||
warn!("Failed to fetch event: {}", next_id);
|
||||
back_off((*next_id).to_owned());
|
||||
back_off((*next_id).to_owned()).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1146,7 +1183,7 @@ impl Service {
|
|||
.globals
|
||||
.bad_event_ratelimiter
|
||||
.read()
|
||||
.unwrap()
|
||||
.await
|
||||
.get(&**next_id)
|
||||
{
|
||||
// Exponential backoff
|
||||
|
@ -1181,7 +1218,7 @@ impl Service {
|
|||
}
|
||||
Err(e) => {
|
||||
warn!("Authentication of event {} failed: {:?}", next_id, e);
|
||||
back_off((**next_id).to_owned());
|
||||
back_off((**next_id).to_owned()).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1336,7 +1373,7 @@ impl Service {
|
|||
|
||||
pub_key_map
|
||||
.write()
|
||||
.map_err(|_| Error::bad_database("RwLock is poisoned."))?
|
||||
.await
|
||||
.insert(signature_server.clone(), keys);
|
||||
}
|
||||
|
||||
|
@ -1345,7 +1382,7 @@ impl Service {
|
|||
|
||||
// Gets a list of servers for which we don't have the signing key yet. We go over
|
||||
// the PDUs and either cache the key or add it to the list that needs to be retrieved.
|
||||
fn get_server_keys_from_cache(
|
||||
async fn get_server_keys_from_cache(
|
||||
&self,
|
||||
pdu: &RawJsonValue,
|
||||
servers: &mut BTreeMap<OwnedServerName, BTreeMap<OwnedServerSigningKeyId, QueryCriteria>>,
|
||||
|
@ -1369,7 +1406,7 @@ impl Service {
|
|||
.globals
|
||||
.bad_event_ratelimiter
|
||||
.read()
|
||||
.unwrap()
|
||||
.await
|
||||
.get(event_id)
|
||||
{
|
||||
// Exponential backoff
|
||||
|
@ -1445,17 +1482,19 @@ impl Service {
|
|||
> = BTreeMap::new();
|
||||
|
||||
{
|
||||
let mut pkm = pub_key_map
|
||||
.write()
|
||||
.map_err(|_| Error::bad_database("RwLock is poisoned."))?;
|
||||
let mut pkm = pub_key_map.write().await;
|
||||
|
||||
// Try to fetch keys, failure is okay
|
||||
// Servers we couldn't find in the cache will be added to `servers`
|
||||
for pdu in &event.room_state.state {
|
||||
let _ = self.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm);
|
||||
let _ = self
|
||||
.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm)
|
||||
.await;
|
||||
}
|
||||
for pdu in &event.room_state.auth_chain {
|
||||
let _ = self.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm);
|
||||
let _ = self
|
||||
.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm)
|
||||
.await;
|
||||
}
|
||||
|
||||
drop(pkm);
|
||||
|
@ -1479,9 +1518,7 @@ impl Service {
|
|||
.await
|
||||
{
|
||||
trace!("Got signing keys: {:?}", keys);
|
||||
let mut pkm = pub_key_map
|
||||
.write()
|
||||
.map_err(|_| Error::bad_database("RwLock is poisoned."))?;
|
||||
let mut pkm = pub_key_map.write().await;
|
||||
for k in keys.server_keys {
|
||||
let k = match k.deserialize() {
|
||||
Ok(key) => key,
|
||||
|
@ -1540,10 +1577,7 @@ impl Service {
|
|||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.key))
|
||||
.collect();
|
||||
pub_key_map
|
||||
.write()
|
||||
.map_err(|_| Error::bad_database("RwLock is poisoned."))?
|
||||
.insert(origin.to_string(), result);
|
||||
pub_key_map.write().await.insert(origin.to_string(), result);
|
||||
}
|
||||
}
|
||||
info!("Done handling result");
|
||||
|
@ -1608,14 +1642,14 @@ impl Service {
|
|||
.globals
|
||||
.servername_ratelimiter
|
||||
.read()
|
||||
.unwrap()
|
||||
.await
|
||||
.get(origin)
|
||||
.map(|s| Arc::clone(s).acquire_owned());
|
||||
|
||||
let permit = match permit {
|
||||
Some(p) => p,
|
||||
None => {
|
||||
let mut write = services().globals.servername_ratelimiter.write().unwrap();
|
||||
let mut write = services().globals.servername_ratelimiter.write().await;
|
||||
let s = Arc::clone(
|
||||
write
|
||||
.entry(origin.to_owned())
|
||||
|
@ -1627,24 +1661,26 @@ impl Service {
|
|||
}
|
||||
.await;
|
||||
|
||||
let back_off = |id| match services()
|
||||
let back_off = |id| async {
|
||||
match services()
|
||||
.globals
|
||||
.bad_signature_ratelimiter
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(id)
|
||||
{
|
||||
hash_map::Entry::Vacant(e) => {
|
||||
e.insert((Instant::now(), 1));
|
||||
}
|
||||
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
||||
}
|
||||
};
|
||||
|
||||
if let Some((time, tries)) = services()
|
||||
.globals
|
||||
.bad_signature_ratelimiter
|
||||
.read()
|
||||
.unwrap()
|
||||
.await
|
||||
.get(&signature_ids)
|
||||
{
|
||||
// Exponential backoff
|
||||
|
@ -1751,7 +1787,7 @@ impl Service {
|
|||
|
||||
drop(permit);
|
||||
|
||||
back_off(signature_ids);
|
||||
back_off(signature_ids).await;
|
||||
|
||||
warn!("Failed to find public key for server: {}", origin);
|
||||
Err(Error::BadServerResponse(
|
||||
|
|
|
@ -1,11 +1,9 @@
|
|||
mod data;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::Mutex,
|
||||
};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
pub use data::Data;
|
||||
use ruma::{DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, RoomId, UserId};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::Result;
|
||||
|
||||
|
@ -14,6 +12,7 @@ use super::timeline::PduCount;
|
|||
pub struct Service {
|
||||
pub db: &'static dyn Data,
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub lazy_load_waiting:
|
||||
Mutex<HashMap<(OwnedUserId, OwnedDeviceId, OwnedRoomId, PduCount), HashSet<OwnedUserId>>>,
|
||||
}
|
||||
|
@ -32,7 +31,7 @@ impl Service {
|
|||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub fn lazy_load_mark_sent(
|
||||
pub async fn lazy_load_mark_sent(
|
||||
&self,
|
||||
user_id: &UserId,
|
||||
device_id: &DeviceId,
|
||||
|
@ -40,7 +39,7 @@ impl Service {
|
|||
lazy_load: HashSet<OwnedUserId>,
|
||||
count: PduCount,
|
||||
) {
|
||||
self.lazy_load_waiting.lock().unwrap().insert(
|
||||
self.lazy_load_waiting.lock().await.insert(
|
||||
(
|
||||
user_id.to_owned(),
|
||||
device_id.to_owned(),
|
||||
|
@ -52,14 +51,14 @@ impl Service {
|
|||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub fn lazy_load_confirm_delivery(
|
||||
pub async fn lazy_load_confirm_delivery(
|
||||
&self,
|
||||
user_id: &UserId,
|
||||
device_id: &DeviceId,
|
||||
room_id: &RoomId,
|
||||
since: PduCount,
|
||||
) -> Result<()> {
|
||||
if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&(
|
||||
if let Some(user_ids) = self.lazy_load_waiting.lock().await.remove(&(
|
||||
user_id.to_owned(),
|
||||
device_id.to_owned(),
|
||||
room_id.to_owned(),
|
||||
|
|
|
@ -5,6 +5,7 @@ use ruma::{EventId, RoomId, UserId};
|
|||
|
||||
pub trait Data: Send + Sync {
|
||||
fn add_relation(&self, from: u64, to: u64) -> Result<()>;
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn relations_until<'a>(
|
||||
&'a self,
|
||||
user_id: &'a UserId,
|
||||
|
|
|
@ -40,6 +40,7 @@ impl Service {
|
|||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn paginate_relations_with_filter(
|
||||
&self,
|
||||
sender_user: &UserId,
|
||||
|
@ -82,7 +83,7 @@ impl Service {
|
|||
services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_event(sender_user, &room_id, &pdu.event_id)
|
||||
.user_can_see_event(sender_user, room_id, &pdu.event_id)
|
||||
.unwrap_or(false)
|
||||
})
|
||||
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
||||
|
@ -106,7 +107,7 @@ impl Service {
|
|||
let events_before: Vec<_> = services()
|
||||
.rooms
|
||||
.pdu_metadata
|
||||
.relations_until(sender_user, &room_id, target, from)?
|
||||
.relations_until(sender_user, room_id, target, from)?
|
||||
.filter(|r| {
|
||||
r.as_ref().map_or(true, |(_, pdu)| {
|
||||
filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t)
|
||||
|
@ -129,7 +130,7 @@ impl Service {
|
|||
services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_event(sender_user, &room_id, &pdu.event_id)
|
||||
.user_can_see_event(sender_user, room_id, &pdu.event_id)
|
||||
.unwrap_or(false)
|
||||
})
|
||||
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
||||
|
|
|
@ -4,6 +4,7 @@ use ruma::RoomId;
|
|||
pub trait Data: Send + Sync {
|
||||
fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>;
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn search_pdus<'a>(
|
||||
&'a self,
|
||||
room_id: &RoomId,
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::Arc;
|
||||
|
||||
use lru_cache::LruCache;
|
||||
use ruma::{
|
||||
|
@ -25,6 +25,7 @@ use ruma::{
|
|||
space::SpaceRoomJoinRule,
|
||||
OwnedRoomId, RoomId, UserId,
|
||||
};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use tracing::{debug, error, warn};
|
||||
|
||||
|
@ -79,7 +80,7 @@ impl Service {
|
|||
if let Some(cached) = self
|
||||
.roomid_spacechunk_cache
|
||||
.lock()
|
||||
.unwrap()
|
||||
.await
|
||||
.get_mut(¤t_room.to_owned())
|
||||
.as_ref()
|
||||
{
|
||||
|
@ -134,7 +135,7 @@ impl Service {
|
|||
|
||||
if serde_json::from_str::<SpaceChildEventContent>(pdu.content.get())
|
||||
.ok()
|
||||
.and_then(|c| c.via)
|
||||
.map(|c| c.via)
|
||||
.map_or(true, |v| v.is_empty())
|
||||
{
|
||||
continue;
|
||||
|
@ -171,7 +172,7 @@ impl Service {
|
|||
.transpose()?
|
||||
.unwrap_or(JoinRule::Invite);
|
||||
|
||||
self.roomid_spacechunk_cache.lock().unwrap().insert(
|
||||
self.roomid_spacechunk_cache.lock().await.insert(
|
||||
current_room.clone(),
|
||||
Some(CachedSpaceChunk {
|
||||
chunk,
|
||||
|
@ -185,7 +186,9 @@ impl Service {
|
|||
stack.push(children_ids);
|
||||
}
|
||||
} else {
|
||||
let server = current_room.server_name();
|
||||
let server = current_room
|
||||
.server_name()
|
||||
.expect("Room IDs should always have a server name");
|
||||
if server == services().globals.server_name() {
|
||||
continue;
|
||||
}
|
||||
|
@ -193,11 +196,11 @@ impl Service {
|
|||
// Early return so the client can see some data already
|
||||
break;
|
||||
}
|
||||
warn!("Asking {server} for /hierarchy");
|
||||
debug!("Asking {server} for /hierarchy");
|
||||
if let Ok(response) = services()
|
||||
.sending
|
||||
.send_federation_request(
|
||||
&server,
|
||||
server,
|
||||
federation::space::get_hierarchy::v1::Request {
|
||||
room_id: current_room.to_owned(),
|
||||
suggested_only,
|
||||
|
@ -235,7 +238,7 @@ impl Service {
|
|||
.room
|
||||
.allowed_room_ids
|
||||
.into_iter()
|
||||
.map(|room| AllowRule::room_membership(room))
|
||||
.map(AllowRule::room_membership)
|
||||
.collect(),
|
||||
})
|
||||
}
|
||||
|
@ -245,7 +248,7 @@ impl Service {
|
|||
.room
|
||||
.allowed_room_ids
|
||||
.into_iter()
|
||||
.map(|room| AllowRule::room_membership(room))
|
||||
.map(AllowRule::room_membership)
|
||||
.collect(),
|
||||
})
|
||||
}
|
||||
|
@ -263,7 +266,7 @@ impl Service {
|
|||
}
|
||||
}
|
||||
|
||||
self.roomid_spacechunk_cache.lock().unwrap().insert(
|
||||
self.roomid_spacechunk_cache.lock().await.insert(
|
||||
current_room.clone(),
|
||||
Some(CachedSpaceChunk {
|
||||
chunk,
|
||||
|
@ -287,7 +290,7 @@ impl Service {
|
|||
} else {
|
||||
self.roomid_spacechunk_cache
|
||||
.lock()
|
||||
.unwrap()
|
||||
.await
|
||||
.insert(current_room.clone(), None);
|
||||
}
|
||||
}
|
||||
|
@ -313,7 +316,7 @@ impl Service {
|
|||
canonical_alias: services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")?
|
||||
.room_state_get(room_id, &StateEventType::RoomCanonicalAlias, "")?
|
||||
.map_or(Ok(None), |s| {
|
||||
serde_json::from_str(s.content.get())
|
||||
.map(|c: RoomCanonicalAliasEventContent| c.alias)
|
||||
|
@ -321,11 +324,11 @@ impl Service {
|
|||
Error::bad_database("Invalid canonical alias event in database.")
|
||||
})
|
||||
})?,
|
||||
name: services().rooms.state_accessor.get_name(&room_id)?,
|
||||
name: services().rooms.state_accessor.get_name(room_id)?,
|
||||
num_joined_members: services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_joined_count(&room_id)?
|
||||
.room_joined_count(room_id)?
|
||||
.unwrap_or_else(|| {
|
||||
warn!("Room {} has no member count", room_id);
|
||||
0
|
||||
|
@ -336,7 +339,7 @@ impl Service {
|
|||
topic: services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(&room_id, &StateEventType::RoomTopic, "")?
|
||||
.room_state_get(room_id, &StateEventType::RoomTopic, "")?
|
||||
.map_or(Ok(None), |s| {
|
||||
serde_json::from_str(s.content.get())
|
||||
.map(|c: RoomTopicEventContent| Some(c.topic))
|
||||
|
@ -348,7 +351,7 @@ impl Service {
|
|||
world_readable: services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")?
|
||||
.room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")?
|
||||
.map_or(Ok(false), |s| {
|
||||
serde_json::from_str(s.content.get())
|
||||
.map(|c: RoomHistoryVisibilityEventContent| {
|
||||
|
@ -363,7 +366,7 @@ impl Service {
|
|||
guest_can_join: services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")?
|
||||
.room_state_get(room_id, &StateEventType::RoomGuestAccess, "")?
|
||||
.map_or(Ok(false), |s| {
|
||||
serde_json::from_str(s.content.get())
|
||||
.map(|c: RoomGuestAccessEventContent| {
|
||||
|
@ -376,7 +379,7 @@ impl Service {
|
|||
avatar_url: services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(&room_id, &StateEventType::RoomAvatar, "")?
|
||||
.room_state_get(room_id, &StateEventType::RoomAvatar, "")?
|
||||
.map(|s| {
|
||||
serde_json::from_str(s.content.get())
|
||||
.map(|c: RoomAvatarEventContent| c.url)
|
||||
|
@ -389,7 +392,7 @@ impl Service {
|
|||
let join_rule = services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(&room_id, &StateEventType::RoomJoinRules, "")?
|
||||
.room_state_get(room_id, &StateEventType::RoomJoinRules, "")?
|
||||
.map(|s| {
|
||||
serde_json::from_str(s.content.get())
|
||||
.map(|c: RoomJoinRulesEventContent| c.join_rule)
|
||||
|
@ -415,7 +418,7 @@ impl Service {
|
|||
room_type: services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(&room_id, &StateEventType::RoomCreate, "")?
|
||||
.room_state_get(room_id, &StateEventType::RoomCreate, "")?
|
||||
.map(|s| {
|
||||
serde_json::from_str::<RoomCreateEventContent>(s.content.get()).map_err(|e| {
|
||||
error!("Invalid room create event in database: {}", e);
|
||||
|
@ -455,7 +458,7 @@ impl Service {
|
|||
SpaceRoomJoinRule::Invite => services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_joined(sender_user, &room_id)?,
|
||||
.is_joined(sender_user, room_id)?,
|
||||
_ => false,
|
||||
};
|
||||
|
||||
|
@ -479,8 +482,7 @@ impl Service {
|
|||
match join_rule {
|
||||
JoinRule::Restricted(r) => {
|
||||
for rule in &r.allow {
|
||||
match rule {
|
||||
join_rules::AllowRule::RoomMembership(rm) => {
|
||||
if let join_rules::AllowRule::RoomMembership(rm) = rule {
|
||||
if let Ok(true) = services()
|
||||
.rooms
|
||||
.state_cache
|
||||
|
@ -489,8 +491,6 @@ impl Service {
|
|||
return Ok(true);
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(false)
|
||||
|
|
|
@ -41,7 +41,7 @@ impl Service {
|
|||
services()
|
||||
.rooms
|
||||
.state_compressor
|
||||
.parse_compressed_state_event(&new)
|
||||
.parse_compressed_state_event(new)
|
||||
.ok()
|
||||
.map(|(_, id)| id)
|
||||
}) {
|
||||
|
@ -95,7 +95,7 @@ impl Service {
|
|||
.spaces
|
||||
.roomid_spacechunk_cache
|
||||
.lock()
|
||||
.unwrap()
|
||||
.await
|
||||
.remove(&pdu.room_id);
|
||||
}
|
||||
_ => continue,
|
||||
|
@ -412,7 +412,7 @@ impl Service {
|
|||
services()
|
||||
.rooms
|
||||
.state_compressor
|
||||
.parse_compressed_state_event(&compressed)
|
||||
.parse_compressed_state_event(compressed)
|
||||
.ok()
|
||||
})
|
||||
.filter_map(|(shortstatekey, event_id)| {
|
||||
|
|
|
@ -16,11 +16,12 @@ use ruma::{
|
|||
},
|
||||
StateEventType,
|
||||
},
|
||||
EventId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
|
||||
EventId, JsOption, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
|
||||
};
|
||||
use tracing::error;
|
||||
use serde_json::value::to_raw_value;
|
||||
use tracing::{error, warn};
|
||||
|
||||
use crate::{services, Error, PduEvent, Result};
|
||||
use crate::{service::pdu::PduBuilder, services, Error, PduEvent, Result};
|
||||
|
||||
pub struct Service {
|
||||
pub db: &'static dyn Data,
|
||||
|
@ -180,7 +181,7 @@ impl Service {
|
|||
return Ok(*visibility);
|
||||
}
|
||||
|
||||
let currently_member = services().rooms.state_cache.is_joined(&user_id, &room_id)?;
|
||||
let currently_member = services().rooms.state_cache.is_joined(user_id, room_id)?;
|
||||
|
||||
let history_visibility = self
|
||||
.state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "")?
|
||||
|
@ -197,11 +198,11 @@ impl Service {
|
|||
HistoryVisibility::Shared => currently_member,
|
||||
HistoryVisibility::Invited => {
|
||||
// Allow if any member on requesting server was AT LEAST invited, else deny
|
||||
self.user_was_invited(shortstatehash, &user_id)
|
||||
self.user_was_invited(shortstatehash, user_id)
|
||||
}
|
||||
HistoryVisibility::Joined => {
|
||||
// Allow if any member on requested server was joined, else deny
|
||||
self.user_was_joined(shortstatehash, &user_id)
|
||||
self.user_was_joined(shortstatehash, user_id)
|
||||
}
|
||||
_ => {
|
||||
error!("Unknown history visibility {history_visibility}");
|
||||
|
@ -221,10 +222,10 @@ impl Service {
|
|||
/// the room's history_visibility at that event's state.
|
||||
#[tracing::instrument(skip(self, user_id, room_id))]
|
||||
pub fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> Result<bool> {
|
||||
let currently_member = services().rooms.state_cache.is_joined(&user_id, &room_id)?;
|
||||
let currently_member = services().rooms.state_cache.is_joined(user_id, room_id)?;
|
||||
|
||||
let history_visibility = self
|
||||
.room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")?
|
||||
.room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")?
|
||||
.map_or(Ok(HistoryVisibility::Shared), |s| {
|
||||
serde_json::from_str(s.content.get())
|
||||
.map(|c: RoomHistoryVisibilityEventContent| c.history_visibility)
|
||||
|
@ -276,25 +277,66 @@ impl Service {
|
|||
services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(&room_id, &StateEventType::RoomName, "")?
|
||||
.room_state_get(room_id, &StateEventType::RoomName, "")?
|
||||
.map_or(Ok(None), |s| {
|
||||
serde_json::from_str(s.content.get())
|
||||
.map(|c: RoomNameEventContent| c.name)
|
||||
.map_err(|_| Error::bad_database("Invalid room name event in database."))
|
||||
.map(|c: RoomNameEventContent| Some(c.name))
|
||||
.map_err(|e| {
|
||||
error!(
|
||||
"Invalid room name event in database for room {}. {}",
|
||||
room_id, e
|
||||
);
|
||||
Error::bad_database("Invalid room name event in database.")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_avatar(&self, room_id: &RoomId) -> Result<Option<RoomAvatarEventContent>> {
|
||||
pub fn get_avatar(&self, room_id: &RoomId) -> Result<JsOption<RoomAvatarEventContent>> {
|
||||
services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(&room_id, &StateEventType::RoomAvatar, "")?
|
||||
.map_or(Ok(None), |s| {
|
||||
.room_state_get(room_id, &StateEventType::RoomAvatar, "")?
|
||||
.map_or(Ok(JsOption::Undefined), |s| {
|
||||
serde_json::from_str(s.content.get())
|
||||
.map_err(|_| Error::bad_database("Invalid room avatar event in database."))
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn user_can_invite(
|
||||
&self,
|
||||
room_id: &RoomId,
|
||||
sender: &UserId,
|
||||
target_user: &UserId,
|
||||
) -> Result<bool> {
|
||||
let content = to_raw_value(&RoomMemberEventContent::new(MembershipState::Invite))
|
||||
.expect("Event content always serializes");
|
||||
|
||||
let new_event = PduBuilder {
|
||||
event_type: ruma::events::TimelineEventType::RoomMember,
|
||||
content,
|
||||
unsigned: None,
|
||||
state_key: Some(target_user.into()),
|
||||
redacts: None,
|
||||
};
|
||||
|
||||
let mutex_state = Arc::clone(
|
||||
services()
|
||||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
Ok(services()
|
||||
.rooms
|
||||
.timeline
|
||||
.create_hash_and_sign_event(new_event, sender, room_id, &state_lock)
|
||||
.is_ok())
|
||||
}
|
||||
|
||||
pub fn get_member(
|
||||
&self,
|
||||
room_id: &RoomId,
|
||||
|
@ -303,7 +345,7 @@ impl Service {
|
|||
services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(&room_id, &StateEventType::RoomMember, user_id.as_str())?
|
||||
.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())?
|
||||
.map_or(Ok(None), |s| {
|
||||
serde_json::from_str(s.content.get())
|
||||
.map_err(|_| Error::bad_database("Invalid room member event in database."))
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use std::{collections::HashSet, sync::Arc};
|
||||
|
||||
use crate::Result;
|
||||
use crate::{service::appservice::RegistrationInfo, Result};
|
||||
use ruma::{
|
||||
events::{AnyStrippedStateEvent, AnySyncStateEvent},
|
||||
serde::Raw,
|
||||
|
@ -22,11 +22,7 @@ pub trait Data: Send + Sync {
|
|||
|
||||
fn get_our_real_users(&self, room_id: &RoomId) -> Result<Arc<HashSet<OwnedUserId>>>;
|
||||
|
||||
fn appservice_in_room(
|
||||
&self,
|
||||
room_id: &RoomId,
|
||||
appservice: &(String, serde_yaml::Value),
|
||||
) -> Result<bool>;
|
||||
fn appservice_in_room(&self, room_id: &RoomId, appservice: &RegistrationInfo) -> Result<bool>;
|
||||
|
||||
/// Makes a user forget a room.
|
||||
fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()>;
|
||||
|
@ -78,6 +74,7 @@ pub trait Data: Send + Sync {
|
|||
) -> Box<dyn Iterator<Item = Result<OwnedRoomId>> + 'a>;
|
||||
|
||||
/// Returns an iterator over all rooms a user was invited to.
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn rooms_invited<'a>(
|
||||
&'a self,
|
||||
user_id: &UserId,
|
||||
|
@ -96,6 +93,7 @@ pub trait Data: Send + Sync {
|
|||
) -> Result<Option<Vec<Raw<AnyStrippedStateEvent>>>>;
|
||||
|
||||
/// Returns an iterator over all rooms a user left.
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn rooms_left<'a>(
|
||||
&'a self,
|
||||
user_id: &UserId,
|
||||
|
|
|
@ -16,7 +16,7 @@ use ruma::{
|
|||
};
|
||||
use tracing::warn;
|
||||
|
||||
use crate::{services, Error, Result};
|
||||
use crate::{service::appservice::RegistrationInfo, services, Error, Result};
|
||||
|
||||
pub struct Service {
|
||||
pub db: &'static dyn Data,
|
||||
|
@ -205,7 +205,7 @@ impl Service {
|
|||
pub fn appservice_in_room(
|
||||
&self,
|
||||
room_id: &RoomId,
|
||||
appservice: &(String, serde_yaml::Value),
|
||||
appservice: &RegistrationInfo,
|
||||
) -> Result<bool> {
|
||||
self.db.appservice_in_room(room_id, appservice)
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue