Merge branch 'next' into 'room-v11'
# Conflicts: # src/service/rooms/timeline/mod.rs # src/utils/error.rs
This commit is contained in:
commit
b5e21f761b
109 changed files with 4239 additions and 3708 deletions
15
.editorconfig
Normal file
15
.editorconfig
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
# EditorConfig is awesome: https://EditorConfig.org
|
||||||
|
|
||||||
|
root = true
|
||||||
|
|
||||||
|
[*]
|
||||||
|
charset = utf-8
|
||||||
|
end_of_line = lf
|
||||||
|
tab_width = 4
|
||||||
|
indent_size = 4
|
||||||
|
indent_style = space
|
||||||
|
insert_final_newline = true
|
||||||
|
max_line_length = 120
|
||||||
|
|
||||||
|
[*.nix]
|
||||||
|
indent_size = 2
|
4
.envrc
4
.envrc
|
@ -1 +1,5 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
use flake
|
use flake
|
||||||
|
|
||||||
|
PATH_add bin
|
||||||
|
|
6
.gitignore
vendored
6
.gitignore
vendored
|
@ -68,3 +68,9 @@ cached_target
|
||||||
|
|
||||||
# Direnv cache
|
# Direnv cache
|
||||||
/.direnv
|
/.direnv
|
||||||
|
|
||||||
|
# Gitlab CI cache
|
||||||
|
/.gitlab-ci.d
|
||||||
|
|
||||||
|
# mdbook output
|
||||||
|
public/
|
410
.gitlab-ci.yml
410
.gitlab-ci.yml
|
@ -1,244 +1,184 @@
|
||||||
stages:
|
stages:
|
||||||
- build
|
- ci
|
||||||
- build docker image
|
- artifacts
|
||||||
- test
|
- publish
|
||||||
- upload artifacts
|
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
# Make GitLab CI go fast:
|
# Makes some things print in color
|
||||||
GIT_SUBMODULE_STRATEGY: recursive
|
TERM: ansi
|
||||||
FF_USE_FASTZIP: 1
|
|
||||||
CACHE_COMPRESSION_LEVEL: fastest
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------- #
|
|
||||||
# Create and publish docker image #
|
|
||||||
# --------------------------------------------------------------------- #
|
|
||||||
|
|
||||||
.docker-shared-settings:
|
|
||||||
stage: "build docker image"
|
|
||||||
needs: []
|
|
||||||
tags: [ "docker" ]
|
|
||||||
variables:
|
|
||||||
# Docker in Docker:
|
|
||||||
DOCKER_BUILDKIT: 1
|
|
||||||
image:
|
|
||||||
name: docker.io/docker
|
|
||||||
services:
|
|
||||||
- name: docker.io/docker:dind
|
|
||||||
alias: docker
|
|
||||||
script:
|
|
||||||
- apk add openssh-client
|
|
||||||
- eval $(ssh-agent -s)
|
|
||||||
- mkdir -p ~/.ssh && chmod 700 ~/.ssh
|
|
||||||
- printf "Host *\n\tStrictHostKeyChecking no\n\n" >> ~/.ssh/config
|
|
||||||
- sh .gitlab/setup-buildx-remote-builders.sh
|
|
||||||
# Authorize against this project's own image registry:
|
|
||||||
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
|
|
||||||
# Build multiplatform image and push to temporary tag:
|
|
||||||
- >
|
|
||||||
docker buildx build
|
|
||||||
--platform "linux/arm/v7,linux/arm64,linux/amd64"
|
|
||||||
--pull
|
|
||||||
--tag "$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
|
|
||||||
--push
|
|
||||||
--provenance=false
|
|
||||||
--file "Dockerfile" .
|
|
||||||
# Build multiplatform image to deb stage and extract their .deb files:
|
|
||||||
- >
|
|
||||||
docker buildx build
|
|
||||||
--platform "linux/arm/v7,linux/arm64,linux/amd64"
|
|
||||||
--target "packager-result"
|
|
||||||
--output="type=local,dest=/tmp/build-output"
|
|
||||||
--provenance=false
|
|
||||||
--file "Dockerfile" .
|
|
||||||
# Build multiplatform image to binary stage and extract their binaries:
|
|
||||||
- >
|
|
||||||
docker buildx build
|
|
||||||
--platform "linux/arm/v7,linux/arm64,linux/amd64"
|
|
||||||
--target "builder-result"
|
|
||||||
--output="type=local,dest=/tmp/build-output"
|
|
||||||
--provenance=false
|
|
||||||
--file "Dockerfile" .
|
|
||||||
# Copy to GitLab container registry:
|
|
||||||
- >
|
|
||||||
docker buildx imagetools create
|
|
||||||
--tag "$CI_REGISTRY_IMAGE/$TAG"
|
|
||||||
--tag "$CI_REGISTRY_IMAGE/$TAG-bullseye"
|
|
||||||
--tag "$CI_REGISTRY_IMAGE/$TAG-commit-$CI_COMMIT_SHORT_SHA"
|
|
||||||
"$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
|
|
||||||
# if DockerHub credentials exist, also copy to dockerhub:
|
|
||||||
- if [ -n "${DOCKER_HUB}" ]; then docker login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD" "$DOCKER_HUB"; fi
|
|
||||||
- >
|
|
||||||
if [ -n "${DOCKER_HUB}" ]; then
|
|
||||||
docker buildx imagetools create
|
|
||||||
--tag "$DOCKER_HUB_IMAGE/$TAG"
|
|
||||||
--tag "$DOCKER_HUB_IMAGE/$TAG-bullseye"
|
|
||||||
--tag "$DOCKER_HUB_IMAGE/$TAG-commit-$CI_COMMIT_SHORT_SHA"
|
|
||||||
"$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
|
|
||||||
; fi
|
|
||||||
- mv /tmp/build-output ./
|
|
||||||
artifacts:
|
|
||||||
paths:
|
|
||||||
- "./build-output/"
|
|
||||||
|
|
||||||
docker:next:
|
|
||||||
extends: .docker-shared-settings
|
|
||||||
rules:
|
|
||||||
- if: '$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_BRANCH == "next"'
|
|
||||||
variables:
|
|
||||||
TAG: "matrix-conduit:next"
|
|
||||||
|
|
||||||
docker:master:
|
|
||||||
extends: .docker-shared-settings
|
|
||||||
rules:
|
|
||||||
- if: '$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_BRANCH == "master"'
|
|
||||||
variables:
|
|
||||||
TAG: "matrix-conduit:latest"
|
|
||||||
|
|
||||||
docker:tags:
|
|
||||||
extends: .docker-shared-settings
|
|
||||||
rules:
|
|
||||||
- if: "$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_TAG"
|
|
||||||
variables:
|
|
||||||
TAG: "matrix-conduit:$CI_COMMIT_TAG"
|
|
||||||
|
|
||||||
|
|
||||||
docker build debugging:
|
|
||||||
extends: .docker-shared-settings
|
|
||||||
rules:
|
|
||||||
- if: "$CI_MERGE_REQUEST_TITLE =~ /.*[Dd]ocker.*/"
|
|
||||||
variables:
|
|
||||||
TAG: "matrix-conduit-docker-tests:latest"
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------- #
|
|
||||||
# Run tests #
|
|
||||||
# --------------------------------------------------------------------- #
|
|
||||||
|
|
||||||
cargo check:
|
|
||||||
stage: test
|
|
||||||
image: docker.io/rust:1.70.0-bullseye
|
|
||||||
needs: []
|
|
||||||
interruptible: true
|
|
||||||
before_script:
|
|
||||||
- "rustup show && rustc --version && cargo --version" # Print version info for debugging
|
|
||||||
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
|
|
||||||
script:
|
|
||||||
- cargo check
|
|
||||||
|
|
||||||
|
|
||||||
.test-shared-settings:
|
|
||||||
stage: "test"
|
|
||||||
needs: []
|
|
||||||
image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest"
|
|
||||||
tags: ["docker"]
|
|
||||||
variables:
|
|
||||||
CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow
|
|
||||||
interruptible: true
|
|
||||||
|
|
||||||
test:cargo:
|
|
||||||
extends: .test-shared-settings
|
|
||||||
before_script:
|
|
||||||
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
|
|
||||||
script:
|
|
||||||
- rustc --version && cargo --version # Print version info for debugging
|
|
||||||
- "cargo test --color always --workspace --verbose --locked --no-fail-fast"
|
|
||||||
|
|
||||||
test:clippy:
|
|
||||||
extends: .test-shared-settings
|
|
||||||
allow_failure: true
|
|
||||||
before_script:
|
|
||||||
- rustup component add clippy
|
|
||||||
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
|
|
||||||
script:
|
|
||||||
- rustc --version && cargo --version # Print version info for debugging
|
|
||||||
- "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json"
|
|
||||||
artifacts:
|
|
||||||
when: always
|
|
||||||
reports:
|
|
||||||
codequality: gl-code-quality-report.json
|
|
||||||
|
|
||||||
test:format:
|
|
||||||
extends: .test-shared-settings
|
|
||||||
before_script:
|
|
||||||
- rustup component add rustfmt
|
|
||||||
script:
|
|
||||||
- cargo fmt --all -- --check
|
|
||||||
|
|
||||||
test:audit:
|
|
||||||
extends: .test-shared-settings
|
|
||||||
allow_failure: true
|
|
||||||
script:
|
|
||||||
- cargo audit --color always || true
|
|
||||||
- cargo audit --stale --json | gitlab-report -p audit > gl-sast-report.json
|
|
||||||
artifacts:
|
|
||||||
when: always
|
|
||||||
reports:
|
|
||||||
sast: gl-sast-report.json
|
|
||||||
|
|
||||||
test:dockerlint:
|
|
||||||
stage: "test"
|
|
||||||
needs: []
|
|
||||||
image: "ghcr.io/hadolint/hadolint@sha256:6c4b7c23f96339489dd35f21a711996d7ce63047467a9a562287748a03ad5242" # 2.8.0-alpine
|
|
||||||
interruptible: true
|
|
||||||
script:
|
|
||||||
- hadolint --version
|
|
||||||
# First pass: Print for CI log:
|
|
||||||
- >
|
|
||||||
hadolint
|
|
||||||
--no-fail --verbose
|
|
||||||
./Dockerfile
|
|
||||||
# Then output the results into a json for GitLab to pretty-print this in the MR:
|
|
||||||
- >
|
|
||||||
hadolint
|
|
||||||
--format gitlab_codeclimate
|
|
||||||
--failure-threshold error
|
|
||||||
./Dockerfile > dockerlint.json
|
|
||||||
artifacts:
|
|
||||||
when: always
|
|
||||||
reports:
|
|
||||||
codequality: dockerlint.json
|
|
||||||
paths:
|
|
||||||
- dockerlint.json
|
|
||||||
rules:
|
|
||||||
- if: '$CI_COMMIT_REF_NAME != "master"'
|
|
||||||
changes:
|
|
||||||
- docker/*Dockerfile
|
|
||||||
- Dockerfile
|
|
||||||
- .gitlab-ci.yml
|
|
||||||
- if: '$CI_COMMIT_REF_NAME == "master"'
|
|
||||||
- if: '$CI_COMMIT_REF_NAME == "next"'
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------- #
|
|
||||||
# Store binaries as package so they have download urls #
|
|
||||||
# --------------------------------------------------------------------- #
|
|
||||||
|
|
||||||
# DISABLED FOR NOW, NEEDS TO BE FIXED AT A LATER TIME:
|
|
||||||
|
|
||||||
#publish:package:
|
|
||||||
# stage: "upload artifacts"
|
|
||||||
# needs:
|
|
||||||
# - "docker:tags"
|
|
||||||
# rules:
|
|
||||||
# - if: "$CI_COMMIT_TAG"
|
|
||||||
# image: curlimages/curl:latest
|
|
||||||
# tags: ["docker"]
|
|
||||||
# variables:
|
|
||||||
# GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts
|
|
||||||
# script:
|
|
||||||
# - 'BASE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}"'
|
|
||||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_amd64/conduit "${BASE_URL}/conduit-x86_64-unknown-linux-gnu"'
|
|
||||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm_v7/conduit "${BASE_URL}/conduit-armv7-unknown-linux-gnu"'
|
|
||||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm64/conduit "${BASE_URL}/conduit-aarch64-unknown-linux-gnu"'
|
|
||||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_amd64/conduit.deb "${BASE_URL}/conduit-x86_64-unknown-linux-gnu.deb"'
|
|
||||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm_v7/conduit.deb "${BASE_URL}/conduit-armv7-unknown-linux-gnu.deb"'
|
|
||||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm64/conduit.deb "${BASE_URL}/conduit-aarch64-unknown-linux-gnu.deb"'
|
|
||||||
|
|
||||||
# Avoid duplicate pipelines
|
# Avoid duplicate pipelines
|
||||||
# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
|
# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
|
||||||
workflow:
|
workflow:
|
||||||
rules:
|
rules:
|
||||||
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
|
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||||
- if: "$CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS"
|
- if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS
|
||||||
when: never
|
when: never
|
||||||
- if: "$CI_COMMIT_BRANCH"
|
- if: $CI
|
||||||
- if: "$CI_COMMIT_TAG"
|
|
||||||
|
before_script:
|
||||||
|
# Enable nix-command and flakes
|
||||||
|
- if command -v nix > /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi
|
||||||
|
|
||||||
|
# Add our own binary cache
|
||||||
|
- if command -v nix > /dev/null; then echo "extra-substituters = https://nix.computer.surgery/conduit" >> /etc/nix/nix.conf; fi
|
||||||
|
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduit:ZGAf6P6LhNvnoJJ3Me3PRg7tlLSrPxcQ2RiE5LIppjo=" >> /etc/nix/nix.conf; fi
|
||||||
|
|
||||||
|
# Add alternate binary cache
|
||||||
|
- if command -v nix > /dev/null && [ -n "$ATTIC_ENDPOINT" ]; then echo "extra-substituters = $ATTIC_ENDPOINT" >> /etc/nix/nix.conf; fi
|
||||||
|
- if command -v nix > /dev/null && [ -n "$ATTIC_PUBLIC_KEY" ]; then echo "extra-trusted-public-keys = $ATTIC_PUBLIC_KEY" >> /etc/nix/nix.conf; fi
|
||||||
|
|
||||||
|
# Add crane binary cache
|
||||||
|
- if command -v nix > /dev/null; then echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf; fi
|
||||||
|
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf; fi
|
||||||
|
|
||||||
|
# Add nix-community binary cache
|
||||||
|
- if command -v nix > /dev/null; then echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf; fi
|
||||||
|
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf; fi
|
||||||
|
|
||||||
|
# Install direnv and nix-direnv
|
||||||
|
- if command -v nix > /dev/null; then nix-env -iA nixpkgs.direnv nixpkgs.nix-direnv; fi
|
||||||
|
|
||||||
|
# Allow .envrc
|
||||||
|
- if command -v nix > /dev/null; then direnv allow; fi
|
||||||
|
|
||||||
|
# Set CARGO_HOME to a cacheable path
|
||||||
|
- export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo"
|
||||||
|
|
||||||
|
ci:
|
||||||
|
stage: ci
|
||||||
|
image: nixos/nix:2.20.4
|
||||||
|
script:
|
||||||
|
# Cache the inputs required for the devShell
|
||||||
|
- ./bin/nix-build-and-cache .#devShells.x86_64-linux.default.inputDerivation
|
||||||
|
|
||||||
|
- direnv exec . engage
|
||||||
|
cache:
|
||||||
|
key: nix
|
||||||
|
paths:
|
||||||
|
- target
|
||||||
|
- .gitlab-ci.d
|
||||||
|
rules:
|
||||||
|
# CI on upstream runners (only available for maintainers)
|
||||||
|
- if: $CI_PIPELINE_SOURCE == "merge_request_event" && $IS_UPSTREAM_CI == "true"
|
||||||
|
# Manual CI on unprotected branches that are not MRs
|
||||||
|
- if: $CI_PIPELINE_SOURCE != "merge_request_event" && $CI_COMMIT_REF_PROTECTED == "false"
|
||||||
|
when: manual
|
||||||
|
# Manual CI on forks
|
||||||
|
- if: $IS_UPSTREAM_CI != "true"
|
||||||
|
when: manual
|
||||||
|
- if: $CI
|
||||||
|
interruptible: true
|
||||||
|
|
||||||
|
artifacts:
|
||||||
|
stage: artifacts
|
||||||
|
image: nixos/nix:2.20.4
|
||||||
|
script:
|
||||||
|
- ./bin/nix-build-and-cache .#static-x86_64-unknown-linux-musl
|
||||||
|
- cp result/bin/conduit x86_64-unknown-linux-musl
|
||||||
|
|
||||||
|
- mkdir -p target/release
|
||||||
|
- cp result/bin/conduit target/release
|
||||||
|
- direnv exec . cargo deb --no-build
|
||||||
|
- mv target/debian/*.deb x86_64-unknown-linux-musl.deb
|
||||||
|
|
||||||
|
# Since the OCI image package is based on the binary package, this has the
|
||||||
|
# fun side effect of uploading the normal binary too. Conduit users who are
|
||||||
|
# deploying with Nix can leverage this fact by adding our binary cache to
|
||||||
|
# their systems.
|
||||||
|
#
|
||||||
|
# Note that although we have an `oci-image-x86_64-unknown-linux-musl`
|
||||||
|
# output, we don't build it because it would be largely redundant to this
|
||||||
|
# one since it's all containerized anyway.
|
||||||
|
- ./bin/nix-build-and-cache .#oci-image
|
||||||
|
- cp result oci-image-amd64.tar.gz
|
||||||
|
|
||||||
|
- ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl
|
||||||
|
- cp result/bin/conduit aarch64-unknown-linux-musl
|
||||||
|
|
||||||
|
- ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl
|
||||||
|
- cp result oci-image-arm64v8.tar.gz
|
||||||
|
|
||||||
|
- ./bin/nix-build-and-cache .#book
|
||||||
|
# We can't just copy the symlink, we need to dereference it https://gitlab.com/gitlab-org/gitlab/-/issues/19746
|
||||||
|
- cp -r --dereference result public
|
||||||
|
artifacts:
|
||||||
|
paths:
|
||||||
|
- x86_64-unknown-linux-musl
|
||||||
|
- aarch64-unknown-linux-musl
|
||||||
|
- x86_64-unknown-linux-musl.deb
|
||||||
|
- oci-image-amd64.tar.gz
|
||||||
|
- oci-image-arm64v8.tar.gz
|
||||||
|
- public
|
||||||
|
rules:
|
||||||
|
# CI required for all MRs
|
||||||
|
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||||
|
# Optional CI on forks
|
||||||
|
- if: $IS_UPSTREAM_CI != "true"
|
||||||
|
when: manual
|
||||||
|
allow_failure: true
|
||||||
|
- if: $CI
|
||||||
|
interruptible: true
|
||||||
|
|
||||||
|
.push-oci-image:
|
||||||
|
stage: publish
|
||||||
|
image: docker:25.0.0
|
||||||
|
services:
|
||||||
|
- docker:25.0.0-dind
|
||||||
|
variables:
|
||||||
|
IMAGE_SUFFIX_AMD64: amd64
|
||||||
|
IMAGE_SUFFIX_ARM64V8: arm64v8
|
||||||
|
script:
|
||||||
|
- docker load -i oci-image-amd64.tar.gz
|
||||||
|
- IMAGE_ID_AMD64=$(docker images -q conduit:next)
|
||||||
|
- docker load -i oci-image-arm64v8.tar.gz
|
||||||
|
- IMAGE_ID_ARM64V8=$(docker images -q conduit:next)
|
||||||
|
# Tag and push the architecture specific images
|
||||||
|
- docker tag $IMAGE_ID_AMD64 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64
|
||||||
|
- docker tag $IMAGE_ID_ARM64V8 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
||||||
|
- docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64
|
||||||
|
- docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
||||||
|
# Tag the multi-arch image
|
||||||
|
- docker manifest create $IMAGE_NAME:$CI_COMMIT_SHA --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
||||||
|
- docker manifest push $IMAGE_NAME:$CI_COMMIT_SHA
|
||||||
|
# Tag and push the git ref
|
||||||
|
- docker manifest create $IMAGE_NAME:$CI_COMMIT_REF_NAME --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
||||||
|
- docker manifest push $IMAGE_NAME:$CI_COMMIT_REF_NAME
|
||||||
|
# Tag git tags as 'latest'
|
||||||
|
- |
|
||||||
|
if [[ -n "$CI_COMMIT_TAG" ]]; then
|
||||||
|
docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
||||||
|
docker manifest push $IMAGE_NAME:latest
|
||||||
|
fi
|
||||||
|
dependencies:
|
||||||
|
- artifacts
|
||||||
|
only:
|
||||||
|
- next
|
||||||
|
- master
|
||||||
|
- tags
|
||||||
|
|
||||||
|
oci-image:push-gitlab:
|
||||||
|
extends: .push-oci-image
|
||||||
|
variables:
|
||||||
|
IMAGE_NAME: $CI_REGISTRY_IMAGE/matrix-conduit
|
||||||
|
before_script:
|
||||||
|
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
||||||
|
|
||||||
|
oci-image:push-dockerhub:
|
||||||
|
extends: .push-oci-image
|
||||||
|
variables:
|
||||||
|
IMAGE_NAME: matrixconduit/matrix-conduit
|
||||||
|
before_script:
|
||||||
|
- docker login -u $DOCKER_HUB_USER -p $DOCKER_HUB_PASSWORD
|
||||||
|
|
||||||
|
pages:
|
||||||
|
stage: publish
|
||||||
|
dependencies:
|
||||||
|
- artifacts
|
||||||
|
only:
|
||||||
|
- next
|
||||||
|
script:
|
||||||
|
- "true"
|
||||||
|
artifacts:
|
||||||
|
paths:
|
||||||
|
- public
|
||||||
|
|
3
.gitlab/route-map.yml
Normal file
3
.gitlab/route-map.yml
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
# Docs: Map markdown to html files
|
||||||
|
- source: /docs/(.+)\.md/
|
||||||
|
public: '\1.html'
|
1580
Cargo.lock
generated
1580
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
52
Cargo.toml
52
Cargo.toml
|
@ -1,3 +1,14 @@
|
||||||
|
# Keep alphabetically sorted
|
||||||
|
[workspace.lints.rust]
|
||||||
|
explicit_outlives_requirements = "warn"
|
||||||
|
unused_qualifications = "warn"
|
||||||
|
|
||||||
|
# Keep alphabetically sorted
|
||||||
|
[workspace.lints.clippy]
|
||||||
|
cloned_instead_of_copied = "warn"
|
||||||
|
dbg_macro = "warn"
|
||||||
|
str_to_string = "warn"
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "conduit"
|
name = "conduit"
|
||||||
description = "A Matrix homeserver written in Rust"
|
description = "A Matrix homeserver written in Rust"
|
||||||
|
@ -9,14 +20,14 @@ readme = "README.md"
|
||||||
version = "0.7.0-alpha"
|
version = "0.7.0-alpha"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
# When changing this, make sure to update the `flake.lock` file by running
|
# See also `rust-toolchain.toml`
|
||||||
# `nix flake update`. If you don't have Nix installed or otherwise don't know
|
rust-version = "1.75.0"
|
||||||
# how to do this, ping `@charles:computer.surgery` or `@dusk:gaze.systems` in
|
|
||||||
# the matrix room.
|
|
||||||
rust-version = "1.70.0"
|
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
# Web framework
|
# Web framework
|
||||||
axum = { version = "0.6.18", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true }
|
axum = { version = "0.6.18", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true }
|
||||||
|
@ -26,7 +37,7 @@ tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitiv
|
||||||
|
|
||||||
# Used for matrix spec type definitions and helpers
|
# Used for matrix spec type definitions and helpers
|
||||||
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
||||||
ruma = { git = "https://github.com/ruma/ruma", rev = "b4853aa8fa5e3a24e3689fc88044de9915f6ab67", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
ruma = { git = "https://github.com/ruma/ruma", rev = "5495b85aa311c2805302edb0a7de40399e22b397", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
||||||
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
||||||
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
||||||
|
|
||||||
|
@ -53,7 +64,8 @@ rand = "0.8.5"
|
||||||
# Used to hash passwords
|
# Used to hash passwords
|
||||||
rust-argon2 = "1.0.0"
|
rust-argon2 = "1.0.0"
|
||||||
# Used to send requests
|
# Used to send requests
|
||||||
reqwest = { default-features = false, features = ["rustls-tls-native-roots", "socks"], git = "https://github.com/timokoesters/reqwest", rev = "57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" }
|
hyper = "0.14.26"
|
||||||
|
reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls-native-roots", "socks"] }
|
||||||
# Used for conduit::Error type
|
# Used for conduit::Error type
|
||||||
thiserror = "1.0.40"
|
thiserror = "1.0.40"
|
||||||
# Used to generate thumbnails for images
|
# Used to generate thumbnails for images
|
||||||
|
@ -61,13 +73,13 @@ image = { version = "0.24.6", default-features = false, features = ["jpeg", "png
|
||||||
# Used to encode server public key
|
# Used to encode server public key
|
||||||
base64 = "0.21.2"
|
base64 = "0.21.2"
|
||||||
# Used when hashing the state
|
# Used when hashing the state
|
||||||
ring = "0.16.20"
|
ring = "0.17.7"
|
||||||
# Used when querying the SRV record of other servers
|
# Used when querying the SRV record of other servers
|
||||||
trust-dns-resolver = "0.22.0"
|
trust-dns-resolver = "0.22.0"
|
||||||
# Used to find matching events for appservices
|
# Used to find matching events for appservices
|
||||||
regex = "1.8.1"
|
regex = "1.8.1"
|
||||||
# jwt jsonwebtokens
|
# jwt jsonwebtokens
|
||||||
jsonwebtoken = "8.3.0"
|
jsonwebtoken = "9.2.0"
|
||||||
# Performance measurements
|
# Performance measurements
|
||||||
tracing = { version = "0.1.37", features = [] }
|
tracing = { version = "0.1.37", features = [] }
|
||||||
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
|
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
|
||||||
|
@ -78,21 +90,19 @@ tracing-opentelemetry = "0.18.0"
|
||||||
lru-cache = "0.1.2"
|
lru-cache = "0.1.2"
|
||||||
rusqlite = { version = "0.29.0", optional = true, features = ["bundled"] }
|
rusqlite = { version = "0.29.0", optional = true, features = ["bundled"] }
|
||||||
parking_lot = { version = "0.12.1", optional = true }
|
parking_lot = { version = "0.12.1", optional = true }
|
||||||
crossbeam = { version = "0.8.2", optional = true }
|
# crossbeam = { version = "0.8.2", optional = true }
|
||||||
num_cpus = "1.15.0"
|
num_cpus = "1.15.0"
|
||||||
threadpool = "1.8.1"
|
threadpool = "1.8.1"
|
||||||
heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
|
# heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
|
||||||
# Used for ruma wrapper
|
# Used for ruma wrapper
|
||||||
serde_html_form = "0.2.0"
|
serde_html_form = "0.2.0"
|
||||||
|
|
||||||
rocksdb = { version = "0.21.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true }
|
|
||||||
|
|
||||||
thread_local = "1.1.7"
|
thread_local = "1.1.7"
|
||||||
# used for TURN server authentication
|
# used for TURN server authentication
|
||||||
hmac = "0.12.1"
|
hmac = "0.12.1"
|
||||||
sha-1 = "0.10.1"
|
sha-1 = "0.10.1"
|
||||||
# used for conduit's CLI and admin room command parsing
|
# used for conduit's CLI and admin room command parsing
|
||||||
clap = { version = "4.3.0", default-features = false, features = ["std", "derive", "help", "usage", "error-context"] }
|
clap = { version = "4.3.0", default-features = false, features = ["std", "derive", "help", "usage", "error-context", "string"] }
|
||||||
futures-util = { version = "0.3.28", default-features = false }
|
futures-util = { version = "0.3.28", default-features = false }
|
||||||
# Used for reading the configuration from conduit.toml & environment variables
|
# Used for reading the configuration from conduit.toml & environment variables
|
||||||
figment = { version = "0.10.8", features = ["env", "toml"] }
|
figment = { version = "0.10.8", features = ["env", "toml"] }
|
||||||
|
@ -104,15 +114,25 @@ async-trait = "0.1.68"
|
||||||
|
|
||||||
sd-notify = { version = "0.4.1", optional = true }
|
sd-notify = { version = "0.4.1", optional = true }
|
||||||
|
|
||||||
|
[dependencies.rocksdb]
|
||||||
|
package = "rust-rocksdb"
|
||||||
|
version = "0.22.7"
|
||||||
|
optional = true
|
||||||
|
features = [
|
||||||
|
"multi-threaded-cf",
|
||||||
|
"zstd",
|
||||||
|
"lz4",
|
||||||
|
]
|
||||||
|
|
||||||
[target.'cfg(unix)'.dependencies]
|
[target.'cfg(unix)'.dependencies]
|
||||||
nix = { version = "0.26.2", features = ["resource"] }
|
nix = { version = "0.28", features = ["resource"] }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "systemd"]
|
default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "systemd"]
|
||||||
#backend_sled = ["sled"]
|
#backend_sled = ["sled"]
|
||||||
backend_persy = ["persy", "parking_lot"]
|
backend_persy = ["persy", "parking_lot"]
|
||||||
backend_sqlite = ["sqlite"]
|
backend_sqlite = ["sqlite"]
|
||||||
backend_heed = ["heed", "crossbeam"]
|
#backend_heed = ["heed", "crossbeam"]
|
||||||
backend_rocksdb = ["rocksdb"]
|
backend_rocksdb = ["rocksdb"]
|
||||||
jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"]
|
jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"]
|
||||||
sqlite = ["rusqlite", "parking_lot", "tokio/signal"]
|
sqlite = ["rusqlite", "parking_lot", "tokio/signal"]
|
||||||
|
|
132
Dockerfile
132
Dockerfile
|
@ -1,132 +0,0 @@
|
||||||
# syntax=docker/dockerfile:1
|
|
||||||
FROM docker.io/rust:1.70-bullseye AS base
|
|
||||||
|
|
||||||
FROM base AS builder
|
|
||||||
WORKDIR /usr/src/conduit
|
|
||||||
|
|
||||||
# Install required packages to build Conduit and it's dependencies
|
|
||||||
RUN apt-get update && \
|
|
||||||
apt-get -y --no-install-recommends install libclang-dev=1:11.0-51+nmu5
|
|
||||||
|
|
||||||
# == Build dependencies without our own code separately for caching ==
|
|
||||||
#
|
|
||||||
# Need a fake main.rs since Cargo refuses to build anything otherwise.
|
|
||||||
#
|
|
||||||
# See https://github.com/rust-lang/cargo/issues/2644 for a Cargo feature
|
|
||||||
# request that would allow just dependencies to be compiled, presumably
|
|
||||||
# regardless of whether source files are available.
|
|
||||||
RUN mkdir src && touch src/lib.rs && echo 'fn main() {}' > src/main.rs
|
|
||||||
COPY Cargo.toml Cargo.lock ./
|
|
||||||
RUN cargo build --release && rm -r src
|
|
||||||
|
|
||||||
# Copy over actual Conduit sources
|
|
||||||
COPY src src
|
|
||||||
|
|
||||||
# main.rs and lib.rs need their timestamp updated for this to work correctly since
|
|
||||||
# otherwise the build with the fake main.rs from above is newer than the
|
|
||||||
# source files (COPY preserves timestamps).
|
|
||||||
#
|
|
||||||
# Builds conduit and places the binary at /usr/src/conduit/target/release/conduit
|
|
||||||
RUN touch src/main.rs && touch src/lib.rs && cargo build --release
|
|
||||||
|
|
||||||
|
|
||||||
# ONLY USEFUL FOR CI: target stage to extract build artifacts
|
|
||||||
FROM scratch AS builder-result
|
|
||||||
COPY --from=builder /usr/src/conduit/target/release/conduit /conduit
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------------------------------------------
|
|
||||||
# Build cargo-deb, a tool to package up rust binaries into .deb packages for Debian/Ubuntu based systems:
|
|
||||||
# ---------------------------------------------------------------------------------------------------------------
|
|
||||||
FROM base AS build-cargo-deb
|
|
||||||
|
|
||||||
RUN apt-get update && \
|
|
||||||
apt-get install -y --no-install-recommends \
|
|
||||||
dpkg \
|
|
||||||
dpkg-dev \
|
|
||||||
liblzma-dev
|
|
||||||
|
|
||||||
RUN cargo install cargo-deb
|
|
||||||
# => binary is in /usr/local/cargo/bin/cargo-deb
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------------------------------------------
|
|
||||||
# Package conduit build-result into a .deb package:
|
|
||||||
# ---------------------------------------------------------------------------------------------------------------
|
|
||||||
FROM builder AS packager
|
|
||||||
WORKDIR /usr/src/conduit
|
|
||||||
|
|
||||||
COPY ./LICENSE ./LICENSE
|
|
||||||
COPY ./README.md ./README.md
|
|
||||||
COPY debian ./debian
|
|
||||||
COPY --from=build-cargo-deb /usr/local/cargo/bin/cargo-deb /usr/local/cargo/bin/cargo-deb
|
|
||||||
|
|
||||||
# --no-build makes cargo-deb reuse already compiled project
|
|
||||||
RUN cargo deb --no-build
|
|
||||||
# => Package is in /usr/src/conduit/target/debian/<project_name>_<version>_<arch>.deb
|
|
||||||
|
|
||||||
|
|
||||||
# ONLY USEFUL FOR CI: target stage to extract build artifacts
|
|
||||||
FROM scratch AS packager-result
|
|
||||||
COPY --from=packager /usr/src/conduit/target/debian/*.deb /conduit.deb
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------------------------------------------
|
|
||||||
# Stuff below this line actually ends up in the resulting docker image
|
|
||||||
# ---------------------------------------------------------------------------------------------------------------
|
|
||||||
FROM docker.io/debian:bullseye-slim AS runner
|
|
||||||
|
|
||||||
# Standard port on which Conduit launches.
|
|
||||||
# You still need to map the port when using the docker command or docker-compose.
|
|
||||||
EXPOSE 6167
|
|
||||||
|
|
||||||
ARG DEFAULT_DB_PATH=/var/lib/matrix-conduit
|
|
||||||
|
|
||||||
ENV CONDUIT_PORT=6167 \
|
|
||||||
CONDUIT_ADDRESS="0.0.0.0" \
|
|
||||||
CONDUIT_DATABASE_PATH=${DEFAULT_DB_PATH} \
|
|
||||||
CONDUIT_CONFIG=''
|
|
||||||
# └─> Set no config file to do all configuration with env vars
|
|
||||||
|
|
||||||
# Conduit needs:
|
|
||||||
# dpkg: to install conduit.deb
|
|
||||||
# ca-certificates: for https
|
|
||||||
# iproute2 & wget: for the healthcheck script
|
|
||||||
RUN apt-get update && apt-get -y --no-install-recommends install \
|
|
||||||
dpkg \
|
|
||||||
ca-certificates \
|
|
||||||
iproute2 \
|
|
||||||
wget \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Test if Conduit is still alive, uses the same endpoint as Element
|
|
||||||
COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh
|
|
||||||
HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
|
|
||||||
|
|
||||||
# Install conduit.deb:
|
|
||||||
COPY --from=packager /usr/src/conduit/target/debian/*.deb /srv/conduit/
|
|
||||||
RUN dpkg -i /srv/conduit/*.deb
|
|
||||||
|
|
||||||
# Improve security: Don't run stuff as root, that does not need to run as root
|
|
||||||
# Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems.
|
|
||||||
ARG USER_ID=1000
|
|
||||||
ARG GROUP_ID=1000
|
|
||||||
RUN set -x ; \
|
|
||||||
groupadd -r -g ${GROUP_ID} conduit ; \
|
|
||||||
useradd -l -r -M -d /srv/conduit -o -u ${USER_ID} -g conduit conduit && exit 0 ; exit 1
|
|
||||||
|
|
||||||
# Create database directory, change ownership of Conduit files to conduit user and group and make the healthcheck executable:
|
|
||||||
RUN chown -cR conduit:conduit /srv/conduit && \
|
|
||||||
chmod +x /srv/conduit/healthcheck.sh && \
|
|
||||||
mkdir -p ${DEFAULT_DB_PATH} && \
|
|
||||||
chown -cR conduit:conduit ${DEFAULT_DB_PATH}
|
|
||||||
|
|
||||||
# Change user to conduit, no root permissions afterwards:
|
|
||||||
USER conduit
|
|
||||||
# Set container home directory
|
|
||||||
WORKDIR /srv/conduit
|
|
||||||
|
|
||||||
# Run Conduit and print backtraces on panics
|
|
||||||
ENV RUST_BACKTRACE=1
|
|
||||||
ENTRYPOINT [ "/usr/sbin/matrix-conduit" ]
|
|
60
README.md
60
README.md
|
@ -1,7 +1,15 @@
|
||||||
# Conduit
|
# Conduit
|
||||||
### A Matrix homeserver written in Rust
|
|
||||||
|
|
||||||
|
<!-- ANCHOR: catchphrase -->
|
||||||
|
### A Matrix homeserver written in Rust
|
||||||
|
<!-- ANCHOR_END: catchphrase -->
|
||||||
|
|
||||||
|
Please visit the [Conduit documentation](https://famedly.gitlab.io/conduit) for more information.
|
||||||
|
Alternatively you can open [docs/introduction.md](docs/introduction.md) in this repository.
|
||||||
|
|
||||||
|
<!-- ANCHOR: body -->
|
||||||
#### What is Matrix?
|
#### What is Matrix?
|
||||||
|
|
||||||
[Matrix](https://matrix.org) is an open network for secure and decentralized
|
[Matrix](https://matrix.org) is an open network for secure and decentralized
|
||||||
communication. Users from every Matrix homeserver can chat with users from all
|
communication. Users from every Matrix homeserver can chat with users from all
|
||||||
other Matrix servers. You can even use bridges (also called Matrix appservices)
|
other Matrix servers. You can even use bridges (also called Matrix appservices)
|
||||||
|
@ -15,8 +23,7 @@ friends or company.
|
||||||
|
|
||||||
#### Can I try it out?
|
#### Can I try it out?
|
||||||
|
|
||||||
Yes! You can test our Conduit instance by opening a Matrix client (<https://app.element.io> or Element Android for
|
Yes! You can test our Conduit instance by opening a client that supports registration tokens such as [Element web](https://app.element.io/), [Nheko](https://matrix.org/ecosystem/clients/nheko/) or [SchildiChat web](https://app.schildi.chat/) and registering on the `conduit.rs` homeserver. The registration token is "for_testing_only". Don't share personal information. Once you have registered, you can use any other [Matrix client](https://matrix.org/ecosystem/clients) to login.
|
||||||
example) and registering on the `conduit.rs` homeserver. The registration token is "for_testing_only". Don't share personal information.
|
|
||||||
|
|
||||||
Server hosting for conduit.rs is donated by the Matrix.org Foundation.
|
Server hosting for conduit.rs is donated by the Matrix.org Foundation.
|
||||||
|
|
||||||
|
@ -30,27 +37,25 @@ There are still a few important features missing:
|
||||||
|
|
||||||
- E2EE emoji comparison over federation (E2EE chat works)
|
- E2EE emoji comparison over federation (E2EE chat works)
|
||||||
- Outgoing read receipts, typing, presence over federation (incoming works)
|
- Outgoing read receipts, typing, presence over federation (incoming works)
|
||||||
|
<!-- ANCHOR_END: body -->
|
||||||
|
|
||||||
Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit/-/milestones/3).
|
<!-- ANCHOR: footer -->
|
||||||
|
|
||||||
#### How can I deploy my own?
|
|
||||||
|
|
||||||
- Simple install (this was tested the most): [DEPLOY.md](DEPLOY.md)
|
|
||||||
- Debian package: [debian/README.md](debian/README.md)
|
|
||||||
- Nix/NixOS: [nix/README.md](nix/README.md)
|
|
||||||
- Docker: [docker/README.md](docker/README.md)
|
|
||||||
|
|
||||||
If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md](APPSERVICES.md).
|
|
||||||
|
|
||||||
#### How can I contribute?
|
#### How can I contribute?
|
||||||
|
|
||||||
1. Look for an issue you would like to work on and make sure it's not assigned
|
1. Look for an issue you would like to work on and make sure no one else is currently working on it.
|
||||||
to other users
|
2. Tell us that you are working on the issue (comment on the issue or chat in
|
||||||
2. Ask someone to assign the issue to you (comment on the issue or chat in
|
[#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org)). If it is more complicated, please explain your approach and ask questions.
|
||||||
[#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org))
|
3. Fork the repo, create a new branch and push commits.
|
||||||
3. Fork the repo and work on the issue.[#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org) is happy to help :)
|
|
||||||
4. Submit a MR
|
4. Submit a MR
|
||||||
|
|
||||||
|
#### Contact
|
||||||
|
|
||||||
|
If you have any questions, feel free to
|
||||||
|
- Ask in `#conduit:fachschaften.org` on Matrix
|
||||||
|
- Write an E-Mail to `conduit@koesters.xyz`
|
||||||
|
- Send an direct message to `@timokoesters:fachschaften.org` on Matrix
|
||||||
|
- [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new)
|
||||||
|
|
||||||
#### Thanks to
|
#### Thanks to
|
||||||
|
|
||||||
Thanks to FUTO, Famedly, Prototype Fund (DLR and German BMBF) and all individuals for financially supporting this project.
|
Thanks to FUTO, Famedly, Prototype Fund (DLR and German BMBF) and all individuals for financially supporting this project.
|
||||||
|
@ -60,20 +65,13 @@ Thanks to the contributors to Conduit and all libraries we use, for example:
|
||||||
- Ruma: A clean library for the Matrix Spec in Rust
|
- Ruma: A clean library for the Matrix Spec in Rust
|
||||||
- axum: A modular web framework
|
- axum: A modular web framework
|
||||||
|
|
||||||
#### Contact
|
|
||||||
|
|
||||||
If you run into any question, feel free to
|
|
||||||
- Ask us in `#conduit:fachschaften.org` on Matrix
|
|
||||||
- Write an E-Mail to `conduit@koesters.xyz`
|
|
||||||
- Send an direct message to `timokoesters@fachschaften.org` on Matrix
|
|
||||||
- [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new)
|
|
||||||
|
|
||||||
#### Donate
|
#### Donate
|
||||||
|
|
||||||
Liberapay: <https://liberapay.com/timokoesters/>\
|
- Liberapay: <https://liberapay.com/timokoesters/>
|
||||||
Bitcoin: `bc1qnnykf986tw49ur7wx9rpw2tevpsztvar5x8w4n`
|
- Bitcoin: `bc1qnnykf986tw49ur7wx9rpw2tevpsztvar5x8w4n`
|
||||||
|
|
||||||
#### Logo
|
#### Logo
|
||||||
|
|
||||||
Lightning Bolt Logo: https://github.com/mozilla/fxemoji/blob/gh-pages/svgs/nature/u26A1-bolt.svg \
|
- Lightning Bolt Logo: <https://github.com/mozilla/fxemoji/blob/gh-pages/svgs/nature/u26A1-bolt.svg>
|
||||||
Logo License: https://github.com/mozilla/fxemoji/blob/gh-pages/LICENSE.md
|
- Logo License: <https://github.com/mozilla/fxemoji/blob/gh-pages/LICENSE.md>
|
||||||
|
<!-- ANCHOR_END: footer -->
|
||||||
|
|
37
bin/complement
Executable file
37
bin/complement
Executable file
|
@ -0,0 +1,37 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# Path to Complement's source code
|
||||||
|
COMPLEMENT_SRC="$1"
|
||||||
|
|
||||||
|
# A `.jsonl` file to write test logs to
|
||||||
|
LOG_FILE="$2"
|
||||||
|
|
||||||
|
# A `.jsonl` file to write test results to
|
||||||
|
RESULTS_FILE="$3"
|
||||||
|
|
||||||
|
OCI_IMAGE="complement-conduit:dev"
|
||||||
|
|
||||||
|
env \
|
||||||
|
-C "$(git rev-parse --show-toplevel)" \
|
||||||
|
docker build \
|
||||||
|
--tag "$OCI_IMAGE" \
|
||||||
|
--file complement/Dockerfile \
|
||||||
|
.
|
||||||
|
|
||||||
|
# It's okay (likely, even) that `go test` exits nonzero
|
||||||
|
set +o pipefail
|
||||||
|
env \
|
||||||
|
-C "$COMPLEMENT_SRC" \
|
||||||
|
COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \
|
||||||
|
go test -json ./tests | tee "$LOG_FILE"
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
# Post-process the results into an easy-to-compare format
|
||||||
|
cat "$LOG_FILE" | jq -c '
|
||||||
|
select(
|
||||||
|
(.Action == "pass" or .Action == "fail" or .Action == "skip")
|
||||||
|
and .Test != null
|
||||||
|
) | {Action: .Action, Test: .Test}
|
||||||
|
' | sort > "$RESULTS_FILE"
|
26
bin/nix-build-and-cache
Executable file
26
bin/nix-build-and-cache
Executable file
|
@ -0,0 +1,26 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# The first argument must be the desired installable
|
||||||
|
INSTALLABLE="$1"
|
||||||
|
|
||||||
|
# Build the installable and forward any other arguments too
|
||||||
|
nix build "$@"
|
||||||
|
|
||||||
|
if [ ! -z ${ATTIC_TOKEN+x} ]; then
|
||||||
|
nix run --inputs-from . attic -- \
|
||||||
|
login \
|
||||||
|
conduit \
|
||||||
|
"${ATTIC_ENDPOINT:-https://nix.computer.surgery/conduit}" \
|
||||||
|
"$ATTIC_TOKEN"
|
||||||
|
|
||||||
|
# Push the target installable and its build dependencies
|
||||||
|
nix run --inputs-from . attic -- \
|
||||||
|
push \
|
||||||
|
conduit \
|
||||||
|
"$(nix path-info "$INSTALLABLE" --derivation)" \
|
||||||
|
"$(nix path-info "$INSTALLABLE")"
|
||||||
|
else
|
||||||
|
echo "\$ATTIC_TOKEN is unset, skipping uploading to the binary cache"
|
||||||
|
fi
|
18
book.toml
Normal file
18
book.toml
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
[book]
|
||||||
|
title = "Conduit"
|
||||||
|
description = "Conduit is a simple, fast and reliable chat server for the Matrix protocol"
|
||||||
|
language = "en"
|
||||||
|
multilingual = false
|
||||||
|
src = "docs"
|
||||||
|
|
||||||
|
[build]
|
||||||
|
build-dir = "public"
|
||||||
|
create-missing = true
|
||||||
|
|
||||||
|
[output.html]
|
||||||
|
git-repository-url = "https://gitlab.com/famedly/conduit"
|
||||||
|
edit-url-template = "https://gitlab.com/famedly/conduit/-/edit/next/{path}"
|
||||||
|
git-repository-icon = "fa-git-square"
|
||||||
|
|
||||||
|
[output.html.search]
|
||||||
|
limit-results = 15
|
|
@ -1,26 +1,30 @@
|
||||||
# For use in our CI only. This requires a build artifact created by a previous run pipline stage to be placed in cached_target/release/conduit
|
FROM rust:1.75.0
|
||||||
FROM registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:commit-16a08e9b as builder
|
|
||||||
#FROM rust:latest as builder
|
|
||||||
|
|
||||||
WORKDIR /workdir
|
WORKDIR /workdir
|
||||||
|
|
||||||
ARG RUSTC_WRAPPER
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
ARG AWS_ACCESS_KEY_ID
|
libclang-dev
|
||||||
ARG AWS_SECRET_ACCESS_KEY
|
|
||||||
ARG SCCACHE_BUCKET
|
|
||||||
ARG SCCACHE_ENDPOINT
|
|
||||||
ARG SCCACHE_S3_USE_SSL
|
|
||||||
|
|
||||||
COPY . .
|
COPY Cargo.toml Cargo.toml
|
||||||
RUN mkdir -p target/release
|
COPY Cargo.lock Cargo.lock
|
||||||
RUN test -e cached_target/release/conduit && cp cached_target/release/conduit target/release/conduit || cargo build --release
|
COPY src src
|
||||||
|
RUN cargo build --release \
|
||||||
## Actual image
|
&& mv target/release/conduit conduit \
|
||||||
FROM debian:bullseye
|
&& rm -rf target
|
||||||
WORKDIR /workdir
|
|
||||||
|
|
||||||
# Install caddy
|
# Install caddy
|
||||||
RUN apt-get update && apt-get install -y debian-keyring debian-archive-keyring apt-transport-https curl && curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/gpg.key' | gpg --dearmor -o /usr/share/keyrings/caddy-testing-archive-keyring.gpg && curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/debian.deb.txt' | tee /etc/apt/sources.list.d/caddy-testing.list && apt-get update && apt-get install -y caddy
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
debian-keyring \
|
||||||
|
debian-archive-keyring \
|
||||||
|
apt-transport-https \
|
||||||
|
curl \
|
||||||
|
&& curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/gpg.key' \
|
||||||
|
| gpg --dearmor -o /usr/share/keyrings/caddy-testing-archive-keyring.gpg \
|
||||||
|
&& curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/debian.deb.txt' \
|
||||||
|
| tee /etc/apt/sources.list.d/caddy-testing.list \
|
||||||
|
&& apt-get update \
|
||||||
|
&& apt-get install -y caddy
|
||||||
|
|
||||||
COPY conduit-example.toml conduit.toml
|
COPY conduit-example.toml conduit.toml
|
||||||
COPY complement/caddy.json caddy.json
|
COPY complement/caddy.json caddy.json
|
||||||
|
@ -29,16 +33,9 @@ ENV SERVER_NAME=localhost
|
||||||
ENV CONDUIT_CONFIG=/workdir/conduit.toml
|
ENV CONDUIT_CONFIG=/workdir/conduit.toml
|
||||||
|
|
||||||
RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml
|
RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml
|
||||||
RUN echo "allow_federation = true" >> conduit.toml
|
|
||||||
RUN echo "allow_check_for_updates = true" >> conduit.toml
|
|
||||||
RUN echo "allow_encryption = true" >> conduit.toml
|
|
||||||
RUN echo "allow_registration = true" >> conduit.toml
|
|
||||||
RUN echo "log = \"warn,_=off,sled=off\"" >> conduit.toml
|
RUN echo "log = \"warn,_=off,sled=off\"" >> conduit.toml
|
||||||
RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml
|
RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml
|
||||||
|
|
||||||
COPY --from=builder /workdir/target/release/conduit /workdir/conduit
|
|
||||||
RUN chmod +x /workdir/conduit
|
|
||||||
|
|
||||||
EXPOSE 8008 8448
|
EXPOSE 8008 8448
|
||||||
|
|
||||||
CMD uname -a && \
|
CMD uname -a && \
|
||||||
|
|
|
@ -1,13 +1,11 @@
|
||||||
# Running Conduit on Complement
|
# Complement
|
||||||
|
|
||||||
This assumes that you're familiar with complement, if not, please readme
|
## What's that?
|
||||||
[their readme](https://github.com/matrix-org/complement#running).
|
|
||||||
|
|
||||||
Complement works with "base images", this directory (and Dockerfile) helps build the conduit complement-ready docker
|
Have a look at [its repository](https://github.com/matrix-org/complement).
|
||||||
image.
|
|
||||||
|
|
||||||
To build, `cd` to the base directory of the workspace, and run this:
|
## How do I use it with Conduit?
|
||||||
|
|
||||||
`docker build -t complement-conduit:dev -f complement/Dockerfile .`
|
The script at [`../bin/complement`](../bin/complement) has automation for this.
|
||||||
|
It takes a few command line arguments, you can read the script to find out what
|
||||||
Then use `complement-conduit:dev` as a base image for running complement tests.
|
those are.
|
||||||
|
|
|
@ -51,7 +51,11 @@ enable_lightning_bolt = true
|
||||||
trusted_servers = ["matrix.org"]
|
trusted_servers = ["matrix.org"]
|
||||||
|
|
||||||
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||||
#log = "warn,state_res=warn,rocket=off,_=off,sled=off"
|
|
||||||
|
# Controls the log verbosity. See also [here][0].
|
||||||
|
#
|
||||||
|
# [0]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives
|
||||||
|
#log = "..."
|
||||||
|
|
||||||
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
||||||
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
|
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
|
||||||
|
|
2
debian/README.md
vendored
2
debian/README.md
vendored
|
@ -5,7 +5,7 @@ Installation
|
||||||
------------
|
------------
|
||||||
|
|
||||||
Information about downloading, building and deploying the Debian package, see
|
Information about downloading, building and deploying the Debian package, see
|
||||||
the "Installing Conduit" section in [DEPLOY.md](../DEPLOY.md).
|
the "Installing Conduit" section in the Deploying docs.
|
||||||
All following sections until "Setting up the Reverse Proxy" be ignored because
|
All following sections until "Setting up the Reverse Proxy" be ignored because
|
||||||
this is handled automatically by the packaging.
|
this is handled automatically by the packaging.
|
||||||
|
|
||||||
|
|
1
debian/postinst
vendored
1
debian/postinst
vendored
|
@ -78,7 +78,6 @@ allow_check_for_updates = true
|
||||||
trusted_servers = ["matrix.org"]
|
trusted_servers = ["matrix.org"]
|
||||||
|
|
||||||
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||||
#log = "warn,state_res=warn,rocket=off,_=off,sled=off"
|
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
|
|
10
default.nix
Normal file
10
default.nix
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
(import
|
||||||
|
(
|
||||||
|
let lock = builtins.fromJSON (builtins.readFile ./flake.lock); in
|
||||||
|
fetchTarball {
|
||||||
|
url = lock.nodes.flake-compat.locked.url or "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz";
|
||||||
|
sha256 = lock.nodes.flake-compat.locked.narHash;
|
||||||
|
}
|
||||||
|
)
|
||||||
|
{ src = ./.; }
|
||||||
|
).defaultNix
|
12
docs/SUMMARY.md
Normal file
12
docs/SUMMARY.md
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
# Summary
|
||||||
|
|
||||||
|
- [Introduction](introduction.md)
|
||||||
|
|
||||||
|
- [Example configuration](configuration.md)
|
||||||
|
- [Deploying](deploying.md)
|
||||||
|
- [Generic](deploying/generic.md)
|
||||||
|
- [Debian](deploying/debian.md)
|
||||||
|
- [Docker](deploying/docker.md)
|
||||||
|
- [NixOS](deploying/nixos.md)
|
||||||
|
- [TURN](turn.md)
|
||||||
|
- [Appservices](appservices.md)
|
5
docs/configuration.md
Normal file
5
docs/configuration.md
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
# Example configuration
|
||||||
|
|
||||||
|
``` toml
|
||||||
|
{{#include ../conduit-example.toml}}
|
||||||
|
```
|
3
docs/deploying.md
Normal file
3
docs/deploying.md
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
# Deploying
|
||||||
|
|
||||||
|
This chapter describes various ways to deploy Conduit.
|
1
docs/deploying/debian.md
Normal file
1
docs/deploying/debian.md
Normal file
|
@ -0,0 +1 @@
|
||||||
|
{{#include ../../debian/README.md}}
|
|
@ -32,7 +32,6 @@ services:
|
||||||
CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||||
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||||
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
||||||
#CONDUIT_LOG: warn,rocket=off,_=off,sled=off
|
|
||||||
CONDUIT_ADDRESS: 0.0.0.0
|
CONDUIT_ADDRESS: 0.0.0.0
|
||||||
CONDUIT_CONFIG: '' # Ignore this
|
CONDUIT_CONFIG: '' # Ignore this
|
||||||
|
|
|
@ -33,7 +33,6 @@ services:
|
||||||
# CONDUIT_PORT: 6167
|
# CONDUIT_PORT: 6167
|
||||||
# CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string ''
|
# CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string ''
|
||||||
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
|
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
|
||||||
# CONDUIT_LOG: info # default is: "warn,_=off,sled=off"
|
|
||||||
# CONDUIT_ALLOW_JAEGER: 'false'
|
# CONDUIT_ALLOW_JAEGER: 'false'
|
||||||
# CONDUIT_ALLOW_ENCRYPTION: 'true'
|
# CONDUIT_ALLOW_ENCRYPTION: 'true'
|
||||||
# CONDUIT_ALLOW_FEDERATION: 'true'
|
# CONDUIT_ALLOW_FEDERATION: 'true'
|
||||||
|
@ -95,4 +94,4 @@ volumes:
|
||||||
acme:
|
acme:
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
proxy:
|
proxy:
|
|
@ -32,7 +32,6 @@ services:
|
||||||
CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||||
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||||
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
||||||
#CONDUIT_LOG: warn,rocket=off,_=off,sled=off
|
|
||||||
CONDUIT_ADDRESS: 0.0.0.0
|
CONDUIT_ADDRESS: 0.0.0.0
|
||||||
CONDUIT_CONFIG: '' # Ignore this
|
CONDUIT_CONFIG: '' # Ignore this
|
||||||
#
|
#
|
|
@ -1,4 +1,4 @@
|
||||||
# Deploy using Docker
|
# Conduit for Docker
|
||||||
|
|
||||||
> **Note:** To run and use Conduit you should probably use it with a Domain or Subdomain behind a reverse proxy (like Nginx, Traefik, Apache, ...) with a Lets Encrypt certificate.
|
> **Note:** To run and use Conduit you should probably use it with a Domain or Subdomain behind a reverse proxy (like Nginx, Traefik, Apache, ...) with a Lets Encrypt certificate.
|
||||||
|
|
||||||
|
@ -64,13 +64,12 @@ docker run -d -p 8448:6167 \
|
||||||
-e CONDUIT_MAX_REQUEST_SIZE="20_000_000" \
|
-e CONDUIT_MAX_REQUEST_SIZE="20_000_000" \
|
||||||
-e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \
|
-e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \
|
||||||
-e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \
|
-e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \
|
||||||
-e CONDUIT_LOG="warn,rocket=off,_=off,sled=off" \
|
|
||||||
--name conduit <link>
|
--name conduit <link>
|
||||||
```
|
```
|
||||||
|
|
||||||
or you can use [docker-compose](#docker-compose).
|
or you can use [docker-compose](#docker-compose).
|
||||||
|
|
||||||
The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../conduit-example.toml).
|
The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../configuration.md).
|
||||||
You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need
|
You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need
|
||||||
to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible values, please take a look at the `docker-compose.yml` file.
|
to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible values, please take a look at the `docker-compose.yml` file.
|
||||||
|
|
||||||
|
@ -88,8 +87,7 @@ Depending on your proxy setup, you can use one of the following files;
|
||||||
When picking the traefik-related compose file, rename it so it matches `docker-compose.yml`, and
|
When picking the traefik-related compose file, rename it so it matches `docker-compose.yml`, and
|
||||||
rename the override file to `docker-compose.override.yml`. Edit the latter with the values you want
|
rename the override file to `docker-compose.override.yml`. Edit the latter with the values you want
|
||||||
for your server.
|
for your server.
|
||||||
|
Additional info about deploying Conduit can be found [here](generic.md).
|
||||||
Additional info about deploying Conduit can be found [here](../DEPLOY.md).
|
|
||||||
|
|
||||||
### Build
|
### Build
|
||||||
|
|
||||||
|
@ -131,7 +129,7 @@ So...step by step:
|
||||||
1. Copy [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or
|
1. Copy [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or
|
||||||
[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and [`docker-compose.override.yml`](docker-compose.override.yml) from the repository and remove `.for-traefik` (or `.with-traefik`) from the filename.
|
[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and [`docker-compose.override.yml`](docker-compose.override.yml) from the repository and remove `.for-traefik` (or `.with-traefik`) from the filename.
|
||||||
2. Open both files and modify/adjust them to your needs. Meaning, change the `CONDUIT_SERVER_NAME` and the volume host mappings according to your needs.
|
2. Open both files and modify/adjust them to your needs. Meaning, change the `CONDUIT_SERVER_NAME` and the volume host mappings according to your needs.
|
||||||
3. Create the `conduit.toml` config file, an example can be found [here](../conduit-example.toml), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars.
|
3. Create the `conduit.toml` config file, an example can be found [here](../configuration.md), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars.
|
||||||
4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`.
|
4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`.
|
||||||
5. Create the files needed by the `well-known` service.
|
5. Create the files needed by the `well-known` service.
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# Deploying Conduit
|
# Generic deployment documentation
|
||||||
|
|
||||||
> ## Getting help
|
> ## Getting help
|
||||||
>
|
>
|
||||||
|
@ -12,11 +12,13 @@ only offer Linux binaries.
|
||||||
|
|
||||||
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the appropriate url:
|
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the appropriate url:
|
||||||
|
|
||||||
| CPU Architecture | Download stable version | Download development version |
|
**Stable versions:**
|
||||||
| ------------------------------------------- | --------------------------------------------------------------- | ----------------------------------------------------------- |
|
|
||||||
| x84_64 / amd64 (Most servers and computers) | [Binary][x84_64-glibc-master] / [.deb][x84_64-glibc-master-deb] | [Binary][x84_64-glibc-next] / [.deb][x84_64-glibc-next-deb] |
|
| CPU Architecture | Download stable version |
|
||||||
| armv7 (e.g. Raspberry Pi by default) | [Binary][armv7-glibc-master] / [.deb][armv7-glibc-master-deb] | [Binary][armv7-glibc-next] / [.deb][armv7-glibc-next-deb] |
|
| ------------------------------------------- | --------------------------------------------------------------- |
|
||||||
| armv8 / aarch64 | [Binary][armv8-glibc-master] / [.deb][armv8-glibc-master-deb] | [Binary][armv8-glibc-next] / [.deb][armv8-glibc-next-deb] |
|
| x84_64 / amd64 (Most servers and computers) | [Binary][x84_64-glibc-master] / [.deb][x84_64-glibc-master-deb] |
|
||||||
|
| armv7 (e.g. Raspberry Pi by default) | [Binary][armv7-glibc-master] / [.deb][armv7-glibc-master-deb] |
|
||||||
|
| armv8 / aarch64 | [Binary][armv8-glibc-master] / [.deb][armv8-glibc-master-deb] |
|
||||||
|
|
||||||
These builds were created on and linked against the glibc version shipped with Debian bullseye.
|
These builds were created on and linked against the glibc version shipped with Debian bullseye.
|
||||||
If you use a system with an older glibc version (e.g. RHEL8), you might need to compile Conduit yourself.
|
If you use a system with an older glibc version (e.g. RHEL8), you might need to compile Conduit yourself.
|
||||||
|
@ -24,15 +26,19 @@ If you use a system with an older glibc version (e.g. RHEL8), you might need to
|
||||||
[x84_64-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit?job=docker:master
|
[x84_64-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit?job=docker:master
|
||||||
[armv7-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit?job=docker:master
|
[armv7-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit?job=docker:master
|
||||||
[armv8-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit?job=docker:master
|
[armv8-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit?job=docker:master
|
||||||
[x84_64-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_amd64/conduit?job=docker:next
|
|
||||||
[armv7-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm_v7/conduit?job=docker:next
|
|
||||||
[armv8-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm64/conduit?job=docker:next
|
|
||||||
[x84_64-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit.deb?job=docker:master
|
[x84_64-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit.deb?job=docker:master
|
||||||
[armv7-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit.deb?job=docker:master
|
[armv7-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit.deb?job=docker:master
|
||||||
[armv8-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit.deb?job=docker:master
|
[armv8-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit.deb?job=docker:master
|
||||||
[x84_64-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_amd64/conduit.deb?job=docker:next
|
|
||||||
[armv7-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm_v7/conduit.deb?job=docker:next
|
**Latest versions:**
|
||||||
[armv8-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm64/conduit.deb?job=docker:next
|
|
||||||
|
| Target | Type | Download |
|
||||||
|
|-|-|-|
|
||||||
|
| `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl.deb?job=artifacts) |
|
||||||
|
| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl?job=artifacts) |
|
||||||
|
| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl?job=artifacts) |
|
||||||
|
| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=artifacts) |
|
||||||
|
| `aarch64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-arm64v8.tar.gz?job=artifacts) |
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ sudo wget -O /usr/local/bin/matrix-conduit <url>
|
$ sudo wget -O /usr/local/bin/matrix-conduit <url>
|
||||||
|
@ -53,26 +59,6 @@ Then, `cd` into the source tree of conduit-next and run:
|
||||||
$ cargo build --release
|
$ cargo build --release
|
||||||
```
|
```
|
||||||
|
|
||||||
If you want to cross compile Conduit to another architecture, read the guide below.
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary>Cross compilation</summary>
|
|
||||||
|
|
||||||
As easiest way to compile conduit for another platform [cross-rs](https://github.com/cross-rs/cross) is recommended, so install it first.
|
|
||||||
|
|
||||||
In order to use RockDB as storage backend append `-latomic` to linker flags.
|
|
||||||
|
|
||||||
For example, to build a binary for Raspberry Pi Zero W (ARMv6) you need `arm-unknown-linux-gnueabihf` as compilation
|
|
||||||
target.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
git clone https://gitlab.com/famedly/conduit.git
|
|
||||||
cd conduit
|
|
||||||
export RUSTFLAGS='-C link-arg=-lgcc -Clink-arg=-latomic -Clink-arg=-static-libgcc'
|
|
||||||
cross build --release --no-default-features --features conduit_bin,backend_rocksdb,jemalloc --target=arm-unknown-linux-gnueabihf
|
|
||||||
```
|
|
||||||
</details>
|
|
||||||
|
|
||||||
## Adding a Conduit user
|
## Adding a Conduit user
|
||||||
|
|
||||||
While Conduit can run as any user it is usually better to use dedicated users for different services. This also allows
|
While Conduit can run as any user it is usually better to use dedicated users for different services. This also allows
|
||||||
|
@ -133,57 +119,12 @@ $ sudo systemctl daemon-reload
|
||||||
|
|
||||||
## Creating the Conduit configuration file
|
## Creating the Conduit configuration file
|
||||||
|
|
||||||
Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste this in **and take a moment
|
Now we need to create the Conduit's config file in
|
||||||
to read it. You need to change at least the server name.**
|
`/etc/matrix-conduit/conduit.toml`. Paste in the contents of
|
||||||
|
[`conduit-example.toml`](../configuration.md) **and take a moment to read it.
|
||||||
|
You need to change at least the server name.**
|
||||||
You can also choose to use a different database backend, but right now only `rocksdb` and `sqlite` are recommended.
|
You can also choose to use a different database backend, but right now only `rocksdb` and `sqlite` are recommended.
|
||||||
|
|
||||||
```toml
|
|
||||||
[global]
|
|
||||||
# The server_name is the pretty name of this server. It is used as a suffix for user
|
|
||||||
# and room ids. Examples: matrix.org, conduit.rs
|
|
||||||
|
|
||||||
# The Conduit server needs all /_matrix/ requests to be reachable at
|
|
||||||
# https://your.server.name/ on port 443 (client-server) and 8448 (federation).
|
|
||||||
|
|
||||||
# If that's not possible for you, you can create /.well-known files to redirect
|
|
||||||
# requests. See
|
|
||||||
# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client
|
|
||||||
# and
|
|
||||||
# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server
|
|
||||||
# for more information
|
|
||||||
|
|
||||||
# YOU NEED TO EDIT THIS
|
|
||||||
#server_name = "your.server.name"
|
|
||||||
|
|
||||||
# This is the only directory where Conduit will save its data
|
|
||||||
database_path = "/var/lib/matrix-conduit/"
|
|
||||||
database_backend = "rocksdb"
|
|
||||||
|
|
||||||
# The port Conduit will be running on. You need to set up a reverse proxy in
|
|
||||||
# your web server (e.g. apache or nginx), so all requests to /_matrix on port
|
|
||||||
# 443 and 8448 will be forwarded to the Conduit instance running on this port
|
|
||||||
# Docker users: Don't change this, you'll need to map an external port to this.
|
|
||||||
port = 6167
|
|
||||||
|
|
||||||
# Max size for uploads
|
|
||||||
max_request_size = 20_000_000 # in bytes
|
|
||||||
|
|
||||||
# Enables registration. If set to false, no users can register on this server.
|
|
||||||
allow_registration = true
|
|
||||||
|
|
||||||
allow_federation = true
|
|
||||||
allow_check_for_updates = true
|
|
||||||
|
|
||||||
# Server to get public keys from. You probably shouldn't change this
|
|
||||||
trusted_servers = ["matrix.org"]
|
|
||||||
|
|
||||||
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
|
||||||
#log = "warn,state_res=warn,rocket=off,_=off,sled=off"
|
|
||||||
|
|
||||||
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
|
||||||
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
|
|
||||||
```
|
|
||||||
|
|
||||||
## Setting the correct file permissions
|
## Setting the correct file permissions
|
||||||
|
|
||||||
As we are using a Conduit specific user we need to allow it to read the config. To do that you can run this command on
|
As we are using a Conduit specific user we need to allow it to read the config. To do that you can run this command on
|
||||||
|
@ -273,7 +214,7 @@ server {
|
||||||
client_max_body_size 20M;
|
client_max_body_size 20M;
|
||||||
|
|
||||||
location /_matrix/ {
|
location /_matrix/ {
|
||||||
proxy_pass http://127.0.0.1:6167$request_uri;
|
proxy_pass http://127.0.0.1:6167;
|
||||||
proxy_set_header Host $http_host;
|
proxy_set_header Host $http_host;
|
||||||
proxy_buffering off;
|
proxy_buffering off;
|
||||||
proxy_read_timeout 5m;
|
proxy_read_timeout 5m;
|
||||||
|
@ -326,7 +267,7 @@ $ sudo systemctl enable conduit
|
||||||
|
|
||||||
## How do I know it works?
|
## How do I know it works?
|
||||||
|
|
||||||
You can open <https://app.element.io>, enter your homeserver and try to register.
|
You can open [a Matrix client](https://matrix.org/ecosystem/clients), enter your homeserver and try to register. If you are using a registration token, use [Element web](https://app.element.io/), [Nheko](https://matrix.org/ecosystem/clients/nheko/) or [SchildiChat web](https://app.schildi.chat/), as they support this feature.
|
||||||
|
|
||||||
You can also use these commands as a quick health check.
|
You can also use these commands as a quick health check.
|
||||||
|
|
||||||
|
@ -344,8 +285,8 @@ $ curl https://your.server.name:8448/_matrix/client/versions
|
||||||
|
|
||||||
## Audio/Video calls
|
## Audio/Video calls
|
||||||
|
|
||||||
For Audio/Video call functionality see the [TURN Guide](TURN.md).
|
For Audio/Video call functionality see the [TURN Guide](../turn.md).
|
||||||
|
|
||||||
## Appservices
|
## Appservices
|
||||||
|
|
||||||
If you want to set up an appservice, take a look at the [Appservice Guide](APPSERVICES.md).
|
If you want to set up an appservice, take a look at the [Appservice Guide](../appservices.md).
|
18
docs/deploying/nixos.md
Normal file
18
docs/deploying/nixos.md
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
# Conduit for NixOS
|
||||||
|
|
||||||
|
Conduit can be acquired by Nix from various places:
|
||||||
|
|
||||||
|
* The `flake.nix` at the root of the repo
|
||||||
|
* The `default.nix` at the root of the repo
|
||||||
|
* From Nixpkgs
|
||||||
|
|
||||||
|
The `flake.nix` and `default.nix` do not (currently) provide a NixOS module, so
|
||||||
|
(for now) [`services.matrix-conduit`][module] from Nixpkgs should be used to
|
||||||
|
configure Conduit.
|
||||||
|
|
||||||
|
If you want to run the latest code, you should get Conduit from the `flake.nix`
|
||||||
|
or `default.nix` and set [`services.matrix-conduit.package`][package]
|
||||||
|
appropriately.
|
||||||
|
|
||||||
|
[module]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit
|
||||||
|
[package]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit.package
|
13
docs/introduction.md
Normal file
13
docs/introduction.md
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
# Conduit
|
||||||
|
|
||||||
|
{{#include ../README.md:catchphrase}}
|
||||||
|
|
||||||
|
{{#include ../README.md:body}}
|
||||||
|
|
||||||
|
#### How can I deploy my own?
|
||||||
|
|
||||||
|
- [Deployment options](deploying.md)
|
||||||
|
|
||||||
|
If you want to connect an Appservice to Conduit, take a look at the [appservices documentation](appservices.md).
|
||||||
|
|
||||||
|
{{#include ../README.md:footer}}
|
|
@ -22,4 +22,4 @@ turn_secret = "ADD SECRET HERE"
|
||||||
|
|
||||||
## Apply settings
|
## Apply settings
|
||||||
|
|
||||||
Restart Conduit.
|
Restart Conduit.
|
74
engage.toml
Normal file
74
engage.toml
Normal file
|
@ -0,0 +1,74 @@
|
||||||
|
interpreter = ["bash", "-euo", "pipefail", "-c"]
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "engage"
|
||||||
|
group = "versions"
|
||||||
|
script = "engage --version"
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "rustc"
|
||||||
|
group = "versions"
|
||||||
|
script = "rustc --version"
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "cargo"
|
||||||
|
group = "versions"
|
||||||
|
script = "cargo --version"
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "cargo-fmt"
|
||||||
|
group = "versions"
|
||||||
|
script = "cargo fmt --version"
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "rustdoc"
|
||||||
|
group = "versions"
|
||||||
|
script = "rustdoc --version"
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "cargo-clippy"
|
||||||
|
group = "versions"
|
||||||
|
script = "cargo clippy -- --version"
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "lychee"
|
||||||
|
group = "versions"
|
||||||
|
script = "lychee --version"
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "cargo-fmt"
|
||||||
|
group = "lints"
|
||||||
|
script = "cargo fmt --check -- --color=always"
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "cargo-doc"
|
||||||
|
group = "lints"
|
||||||
|
script = """
|
||||||
|
RUSTDOCFLAGS="-D warnings" cargo doc \
|
||||||
|
--workspace \
|
||||||
|
--no-deps \
|
||||||
|
--document-private-items \
|
||||||
|
--color always
|
||||||
|
"""
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "cargo-clippy"
|
||||||
|
group = "lints"
|
||||||
|
script = "cargo clippy --workspace --all-targets --color=always -- -D warnings"
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "lychee"
|
||||||
|
group = "lints"
|
||||||
|
script = "lychee --offline docs"
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "cargo"
|
||||||
|
group = "tests"
|
||||||
|
script = """
|
||||||
|
cargo test \
|
||||||
|
--workspace \
|
||||||
|
--all-targets \
|
||||||
|
--color=always \
|
||||||
|
-- \
|
||||||
|
--color=always
|
||||||
|
"""
|
206
flake.lock
generated
206
flake.lock
generated
|
@ -1,22 +1,41 @@
|
||||||
{
|
{
|
||||||
"nodes": {
|
"nodes": {
|
||||||
"crane": {
|
"attic": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
|
"crane": "crane",
|
||||||
"flake-compat": "flake-compat",
|
"flake-compat": "flake-compat",
|
||||||
"flake-utils": [
|
"flake-utils": "flake-utils",
|
||||||
"flake-utils"
|
"nixpkgs": "nixpkgs",
|
||||||
],
|
"nixpkgs-stable": "nixpkgs-stable"
|
||||||
"nixpkgs": [
|
|
||||||
"nixpkgs"
|
|
||||||
],
|
|
||||||
"rust-overlay": "rust-overlay"
|
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1688772518,
|
"lastModified": 1707922053,
|
||||||
"narHash": "sha256-ol7gZxwvgLnxNSZwFTDJJ49xVY5teaSvF7lzlo3YQfM=",
|
"narHash": "sha256-wSZjK+rOXn+UQiP1NbdNn5/UW6UcBxjvlqr2wh++MbM=",
|
||||||
|
"owner": "zhaofengli",
|
||||||
|
"repo": "attic",
|
||||||
|
"rev": "6eabc3f02fae3683bffab483e614bebfcd476b21",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "zhaofengli",
|
||||||
|
"ref": "main",
|
||||||
|
"repo": "attic",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"crane": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs": [
|
||||||
|
"attic",
|
||||||
|
"nixpkgs"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1702918879,
|
||||||
|
"narHash": "sha256-tWJqzajIvYcaRWxn+cLUB9L9Pv4dQ3Bfit/YjU5ze3g=",
|
||||||
"owner": "ipetkov",
|
"owner": "ipetkov",
|
||||||
"repo": "crane",
|
"repo": "crane",
|
||||||
"rev": "8b08e96c9af8c6e3a2b69af5a7fa168750fcf88e",
|
"rev": "7195c00c272fdd92fc74e7d5a0a2844b9fadb2fb",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -25,6 +44,27 @@
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"crane_2": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs": [
|
||||||
|
"nixpkgs"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1707685877,
|
||||||
|
"narHash": "sha256-XoXRS+5whotelr1rHiZle5t5hDg9kpguS5yk8c8qzOc=",
|
||||||
|
"owner": "ipetkov",
|
||||||
|
"repo": "crane",
|
||||||
|
"rev": "2c653e4478476a52c6aa3ac0495e4dea7449ea0e",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "ipetkov",
|
||||||
|
"repo": "crane",
|
||||||
|
"rev": "2c653e4478476a52c6aa3ac0495e4dea7449ea0e",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
"fenix": {
|
"fenix": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"nixpkgs": [
|
"nixpkgs": [
|
||||||
|
@ -33,11 +73,11 @@
|
||||||
"rust-analyzer-src": "rust-analyzer-src"
|
"rust-analyzer-src": "rust-analyzer-src"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1689488573,
|
"lastModified": 1709619709,
|
||||||
"narHash": "sha256-diVASflKCCryTYv0djvMnP2444mFsIG0ge5pa7ahauQ=",
|
"narHash": "sha256-l6EPVJfwfelWST7qWQeP6t/TDK3HHv5uUB1b2vw4mOQ=",
|
||||||
"owner": "nix-community",
|
"owner": "nix-community",
|
||||||
"repo": "fenix",
|
"repo": "fenix",
|
||||||
"rev": "39096fe3f379036ff4a5fa198950b8e79defe939",
|
"rev": "c8943ea9e98d41325ff57d4ec14736d330b321b2",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -62,16 +102,29 @@
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"flake-utils": {
|
"flake-compat_2": {
|
||||||
"inputs": {
|
"flake": false,
|
||||||
"systems": "systems"
|
|
||||||
},
|
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1689068808,
|
"lastModified": 1696426674,
|
||||||
"narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=",
|
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
|
||||||
|
"owner": "edolstra",
|
||||||
|
"repo": "flake-compat",
|
||||||
|
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "edolstra",
|
||||||
|
"repo": "flake-compat",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"flake-utils": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1667395993,
|
||||||
|
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
||||||
"owner": "numtide",
|
"owner": "numtide",
|
||||||
"repo": "flake-utils",
|
"repo": "flake-utils",
|
||||||
"rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4",
|
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -80,13 +133,78 @@
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"flake-utils_2": {
|
||||||
|
"inputs": {
|
||||||
|
"systems": "systems"
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1709126324,
|
||||||
|
"narHash": "sha256-q6EQdSeUZOG26WelxqkmR7kArjgWCdw5sfJVHPH/7j8=",
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"rev": "d465f4819400de7c8d874d50b982301f28a84605",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nix-filter": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1705332318,
|
||||||
|
"narHash": "sha256-kcw1yFeJe9N4PjQji9ZeX47jg0p9A0DuU4djKvg1a7I=",
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "nix-filter",
|
||||||
|
"rev": "3449dc925982ad46246cfc36469baf66e1b64f17",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "nix-filter",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1689444953,
|
"lastModified": 1702539185,
|
||||||
"narHash": "sha256-0o56bfb2LC38wrinPdCGLDScd77LVcr7CrH1zK7qvDg=",
|
"narHash": "sha256-KnIRG5NMdLIpEkZTnN5zovNYc0hhXjAgv6pfd5Z4c7U=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "8acef304efe70152463a6399f73e636bcc363813",
|
"rev": "aa9d4729cbc99dabacb50e3994dcefb3ea0f7447",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "NixOS",
|
||||||
|
"ref": "nixpkgs-unstable",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs-stable": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1702780907,
|
||||||
|
"narHash": "sha256-blbrBBXjjZt6OKTcYX1jpe9SRof2P9ZYWPzq22tzXAA=",
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "1e2e384c5b7c50dbf8e9c441a9e58d85f408b01f",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "NixOS",
|
||||||
|
"ref": "nixos-23.11",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs_2": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1709479366,
|
||||||
|
"narHash": "sha256-n6F0n8UV6lnTZbYPl1A9q1BS0p4hduAv1mGAP17CVd0=",
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "b8697e57f10292a6165a20f03d2f42920dfaf973",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -98,20 +216,23 @@
|
||||||
},
|
},
|
||||||
"root": {
|
"root": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"crane": "crane",
|
"attic": "attic",
|
||||||
|
"crane": "crane_2",
|
||||||
"fenix": "fenix",
|
"fenix": "fenix",
|
||||||
"flake-utils": "flake-utils",
|
"flake-compat": "flake-compat_2",
|
||||||
"nixpkgs": "nixpkgs"
|
"flake-utils": "flake-utils_2",
|
||||||
|
"nix-filter": "nix-filter",
|
||||||
|
"nixpkgs": "nixpkgs_2"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"rust-analyzer-src": {
|
"rust-analyzer-src": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1689441253,
|
"lastModified": 1709571018,
|
||||||
"narHash": "sha256-4MSDZaFI4DOfsLIZYPMBl0snzWhX1/OqR/QHir382CY=",
|
"narHash": "sha256-ISFrxHxE0J5g7lDAscbK88hwaT5uewvWoma9TlFmRzM=",
|
||||||
"owner": "rust-lang",
|
"owner": "rust-lang",
|
||||||
"repo": "rust-analyzer",
|
"repo": "rust-analyzer",
|
||||||
"rev": "996e054f1eb1dbfc8455ecabff0f6ff22ba7f7c8",
|
"rev": "9f14343f9ee24f53f17492c5f9b653427e2ad15e",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -121,31 +242,6 @@
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"rust-overlay": {
|
|
||||||
"inputs": {
|
|
||||||
"flake-utils": [
|
|
||||||
"crane",
|
|
||||||
"flake-utils"
|
|
||||||
],
|
|
||||||
"nixpkgs": [
|
|
||||||
"crane",
|
|
||||||
"nixpkgs"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1688351637,
|
|
||||||
"narHash": "sha256-CLTufJ29VxNOIZ8UTg0lepsn3X03AmopmaLTTeHDCL4=",
|
|
||||||
"owner": "oxalica",
|
|
||||||
"repo": "rust-overlay",
|
|
||||||
"rev": "f9b92316727af9e6c7fee4a761242f7f46880329",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "oxalica",
|
|
||||||
"repo": "rust-overlay",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"systems": {
|
"systems": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1681028828,
|
"lastModified": 1681028828,
|
||||||
|
|
322
flake.nix
322
flake.nix
|
@ -2,92 +2,316 @@
|
||||||
inputs = {
|
inputs = {
|
||||||
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable";
|
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable";
|
||||||
flake-utils.url = "github:numtide/flake-utils";
|
flake-utils.url = "github:numtide/flake-utils";
|
||||||
|
nix-filter.url = "github:numtide/nix-filter";
|
||||||
|
flake-compat = {
|
||||||
|
url = "github:edolstra/flake-compat";
|
||||||
|
flake = false;
|
||||||
|
};
|
||||||
|
|
||||||
fenix = {
|
fenix = {
|
||||||
url = "github:nix-community/fenix";
|
url = "github:nix-community/fenix";
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
};
|
};
|
||||||
crane = {
|
crane = {
|
||||||
url = "github:ipetkov/crane";
|
# Pin latest crane that's not affected by the following bugs:
|
||||||
|
#
|
||||||
|
# * <https://github.com/ipetkov/crane/issues/527#issuecomment-1978079140>
|
||||||
|
# * <https://github.com/toml-rs/toml/issues/691>
|
||||||
|
# * <https://github.com/toml-rs/toml/issues/267>
|
||||||
|
url = "github:ipetkov/crane?rev=2c653e4478476a52c6aa3ac0495e4dea7449ea0e";
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
inputs.flake-utils.follows = "flake-utils";
|
|
||||||
};
|
};
|
||||||
|
attic.url = "github:zhaofengli/attic?ref=main";
|
||||||
};
|
};
|
||||||
|
|
||||||
outputs =
|
outputs =
|
||||||
{ self
|
{ self
|
||||||
, nixpkgs
|
, nixpkgs
|
||||||
, flake-utils
|
, flake-utils
|
||||||
|
, nix-filter
|
||||||
|
|
||||||
, fenix
|
, fenix
|
||||||
, crane
|
, crane
|
||||||
|
, ...
|
||||||
}: flake-utils.lib.eachDefaultSystem (system:
|
}: flake-utils.lib.eachDefaultSystem (system:
|
||||||
let
|
let
|
||||||
pkgs = nixpkgs.legacyPackages.${system};
|
pkgsHost = nixpkgs.legacyPackages.${system};
|
||||||
|
|
||||||
# Use mold on Linux
|
|
||||||
stdenv = if pkgs.stdenv.isLinux then
|
|
||||||
pkgs.stdenvAdapters.useMoldLinker pkgs.stdenv
|
|
||||||
else
|
|
||||||
pkgs.stdenv;
|
|
||||||
|
|
||||||
# Nix-accessible `Cargo.toml`
|
# Nix-accessible `Cargo.toml`
|
||||||
cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml);
|
cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml);
|
||||||
|
|
||||||
# The Rust toolchain to use
|
# The Rust toolchain to use
|
||||||
toolchain = fenix.packages.${system}.toolchainOf {
|
toolchain = fenix.packages.${system}.fromToolchainFile {
|
||||||
# Use the Rust version defined in `Cargo.toml`
|
file = ./rust-toolchain.toml;
|
||||||
channel = cargoToml.package.rust-version;
|
|
||||||
|
|
||||||
# THE rust-version HASH
|
# See also `rust-toolchain.toml`
|
||||||
sha256 = "sha256-gdYqng0y9iHYzYPAdkC/ka3DRny3La/S5G8ASj0Ayyc=";
|
sha256 = "sha256-SXRtAuO4IqNOQq+nLbrsDFbVk+3aVA8NNpSZsKlVH/8=";
|
||||||
};
|
};
|
||||||
|
|
||||||
# The system's RocksDB
|
builder = pkgs:
|
||||||
ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb}/include";
|
((crane.mkLib pkgs).overrideToolchain toolchain).buildPackage;
|
||||||
ROCKSDB_LIB_DIR = "${pkgs.rocksdb}/lib";
|
|
||||||
|
|
||||||
# Shared between the package and the devShell
|
nativeBuildInputs = pkgs: [
|
||||||
nativeBuildInputs = (with pkgs.rustPlatform; [
|
# bindgen needs the build platform's libclang. Apparently due to
|
||||||
bindgenHook
|
# "splicing weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't
|
||||||
]);
|
# quite do the right thing here.
|
||||||
|
pkgs.pkgsBuildHost.rustPlatform.bindgenHook
|
||||||
|
];
|
||||||
|
|
||||||
builder =
|
rocksdb' = pkgs:
|
||||||
((crane.mkLib pkgs).overrideToolchain toolchain.toolchain).buildPackage;
|
let
|
||||||
|
version = "8.11.3";
|
||||||
|
in
|
||||||
|
pkgs.rocksdb.overrideAttrs (old: {
|
||||||
|
inherit version;
|
||||||
|
src = pkgs.fetchFromGitHub {
|
||||||
|
owner = "facebook";
|
||||||
|
repo = "rocksdb";
|
||||||
|
rev = "v${version}";
|
||||||
|
hash = "sha256-OpEiMwGxZuxb9o3RQuSrwZMQGLhe9xLT1aa3HpI4KPs=";
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
env = pkgs: {
|
||||||
|
CONDUIT_VERSION_EXTRA = self.shortRev or self.dirtyShortRev;
|
||||||
|
ROCKSDB_INCLUDE_DIR = "${rocksdb' pkgs}/include";
|
||||||
|
ROCKSDB_LIB_DIR = "${rocksdb' pkgs}/lib";
|
||||||
|
}
|
||||||
|
// pkgs.lib.optionalAttrs pkgs.stdenv.hostPlatform.isStatic {
|
||||||
|
ROCKSDB_STATIC = "";
|
||||||
|
}
|
||||||
|
// {
|
||||||
|
CARGO_BUILD_RUSTFLAGS = let inherit (pkgs) lib stdenv; in
|
||||||
|
lib.concatStringsSep " " ([]
|
||||||
|
++ lib.optionals
|
||||||
|
# This disables PIE for static builds, which isn't great in terms
|
||||||
|
# of security. Unfortunately, my hand is forced because nixpkgs'
|
||||||
|
# `libstdc++.a` is built without `-fPIE`, which precludes us from
|
||||||
|
# leaving PIE enabled.
|
||||||
|
stdenv.hostPlatform.isStatic
|
||||||
|
["-C" "relocation-model=static"]
|
||||||
|
++ lib.optionals
|
||||||
|
(stdenv.buildPlatform.config != stdenv.hostPlatform.config)
|
||||||
|
["-l" "c"]
|
||||||
|
++ lib.optionals
|
||||||
|
# This check has to match the one [here][0]. We only need to set
|
||||||
|
# these flags when using a different linker. Don't ask me why,
|
||||||
|
# though, because I don't know. All I know is it breaks otherwise.
|
||||||
|
#
|
||||||
|
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L37-L40
|
||||||
|
(
|
||||||
|
# Nixpkgs doesn't check for x86_64 here but we do, because I
|
||||||
|
# observed a failure building statically for x86_64 without
|
||||||
|
# including it here. Linkers are weird.
|
||||||
|
(stdenv.hostPlatform.isAarch64 || stdenv.hostPlatform.isx86_64)
|
||||||
|
&& stdenv.hostPlatform.isStatic
|
||||||
|
&& !stdenv.isDarwin
|
||||||
|
&& !stdenv.cc.bintools.isLLVM
|
||||||
|
)
|
||||||
|
[
|
||||||
|
"-l"
|
||||||
|
"stdc++"
|
||||||
|
"-L"
|
||||||
|
"${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib"
|
||||||
|
]
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
# What follows is stolen from [here][0]. Its purpose is to properly
|
||||||
|
# configure compilers and linkers for various stages of the build, and
|
||||||
|
# even covers the case of build scripts that need native code compiled and
|
||||||
|
# run on the build platform (I think).
|
||||||
|
#
|
||||||
|
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L57-L80
|
||||||
|
// (
|
||||||
|
let
|
||||||
|
inherit (pkgs.rust.lib) envVars;
|
||||||
|
in
|
||||||
|
pkgs.lib.optionalAttrs
|
||||||
|
(pkgs.stdenv.targetPlatform.rust.rustcTarget
|
||||||
|
!= pkgs.stdenv.hostPlatform.rust.rustcTarget)
|
||||||
|
(
|
||||||
|
let
|
||||||
|
inherit (pkgs.stdenv.targetPlatform.rust) cargoEnvVarTarget;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
"CC_${cargoEnvVarTarget}" = envVars.ccForTarget;
|
||||||
|
"CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget;
|
||||||
|
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" =
|
||||||
|
envVars.linkerForTarget;
|
||||||
|
}
|
||||||
|
)
|
||||||
|
// (
|
||||||
|
let
|
||||||
|
inherit (pkgs.stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
"CC_${cargoEnvVarTarget}" = envVars.ccForHost;
|
||||||
|
"CXX_${cargoEnvVarTarget}" = envVars.cxxForHost;
|
||||||
|
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost;
|
||||||
|
CARGO_BUILD_TARGET = rustcTarget;
|
||||||
|
}
|
||||||
|
)
|
||||||
|
// (
|
||||||
|
let
|
||||||
|
inherit (pkgs.stdenv.buildPlatform.rust) cargoEnvVarTarget;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
"CC_${cargoEnvVarTarget}" = envVars.ccForBuild;
|
||||||
|
"CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild;
|
||||||
|
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild;
|
||||||
|
HOST_CC = "${pkgs.pkgsBuildHost.stdenv.cc}/bin/cc";
|
||||||
|
HOST_CXX = "${pkgs.pkgsBuildHost.stdenv.cc}/bin/c++";
|
||||||
|
}
|
||||||
|
));
|
||||||
|
|
||||||
|
package = pkgs: builder pkgs {
|
||||||
|
src = nix-filter {
|
||||||
|
root = ./.;
|
||||||
|
include = [
|
||||||
|
"src"
|
||||||
|
"Cargo.toml"
|
||||||
|
"Cargo.lock"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
# This is redundant with CI
|
||||||
|
doCheck = false;
|
||||||
|
|
||||||
|
env = env pkgs;
|
||||||
|
nativeBuildInputs = nativeBuildInputs pkgs;
|
||||||
|
|
||||||
|
meta.mainProgram = cargoToml.package.name;
|
||||||
|
};
|
||||||
|
|
||||||
|
mkOciImage = pkgs: package:
|
||||||
|
pkgs.dockerTools.buildImage {
|
||||||
|
name = package.pname;
|
||||||
|
tag = "next";
|
||||||
|
copyToRoot = [
|
||||||
|
pkgs.dockerTools.caCertificates
|
||||||
|
];
|
||||||
|
config = {
|
||||||
|
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT)
|
||||||
|
# are handled as expected
|
||||||
|
Entrypoint = [
|
||||||
|
"${pkgs.lib.getExe' pkgs.tini "tini"}"
|
||||||
|
"--"
|
||||||
|
];
|
||||||
|
Cmd = [
|
||||||
|
"${pkgs.lib.getExe package}"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
packages.default = builder {
|
packages = {
|
||||||
src = ./.;
|
default = package pkgsHost;
|
||||||
|
oci-image = mkOciImage pkgsHost self.packages.${system}.default;
|
||||||
|
|
||||||
inherit
|
book =
|
||||||
stdenv
|
let
|
||||||
nativeBuildInputs
|
package = self.packages.${system}.default;
|
||||||
ROCKSDB_INCLUDE_DIR
|
in
|
||||||
ROCKSDB_LIB_DIR;
|
pkgsHost.stdenv.mkDerivation {
|
||||||
};
|
pname = "${package.pname}-book";
|
||||||
|
version = package.version;
|
||||||
|
|
||||||
devShells.default = (pkgs.mkShell.override { inherit stdenv; }) {
|
src = nix-filter {
|
||||||
# Rust Analyzer needs to be able to find the path to default crate
|
root = ./.;
|
||||||
# sources, and it can read this environment variable to do so
|
include = [
|
||||||
RUST_SRC_PATH = "${toolchain.rust-src}/lib/rustlib/src/rust/library";
|
"book.toml"
|
||||||
|
"conduit-example.toml"
|
||||||
|
"README.md"
|
||||||
|
"debian/README.md"
|
||||||
|
"docs"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
inherit
|
nativeBuildInputs = (with pkgsHost; [
|
||||||
ROCKSDB_INCLUDE_DIR
|
mdbook
|
||||||
ROCKSDB_LIB_DIR;
|
]);
|
||||||
|
|
||||||
|
buildPhase = ''
|
||||||
|
mdbook build
|
||||||
|
mv public $out
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
}
|
||||||
|
//
|
||||||
|
builtins.listToAttrs
|
||||||
|
(builtins.concatLists
|
||||||
|
(builtins.map
|
||||||
|
(crossSystem:
|
||||||
|
let
|
||||||
|
binaryName = "static-${crossSystem}";
|
||||||
|
pkgsCrossStatic =
|
||||||
|
(import nixpkgs {
|
||||||
|
inherit system;
|
||||||
|
crossSystem = {
|
||||||
|
config = crossSystem;
|
||||||
|
};
|
||||||
|
}).pkgsStatic;
|
||||||
|
in
|
||||||
|
[
|
||||||
|
# An output for a statically-linked binary
|
||||||
|
{
|
||||||
|
name = binaryName;
|
||||||
|
value = package pkgsCrossStatic;
|
||||||
|
}
|
||||||
|
|
||||||
|
# An output for an OCI image based on that binary
|
||||||
|
{
|
||||||
|
name = "oci-image-${crossSystem}";
|
||||||
|
value = mkOciImage
|
||||||
|
pkgsCrossStatic
|
||||||
|
self.packages.${system}.${binaryName};
|
||||||
|
}
|
||||||
|
]
|
||||||
|
)
|
||||||
|
[
|
||||||
|
"x86_64-unknown-linux-musl"
|
||||||
|
"aarch64-unknown-linux-musl"
|
||||||
|
]
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
devShells.default = pkgsHost.mkShell {
|
||||||
|
env = env pkgsHost // {
|
||||||
|
# Rust Analyzer needs to be able to find the path to default crate
|
||||||
|
# sources, and it can read this environment variable to do so. The
|
||||||
|
# `rust-src` component is required in order for this to work.
|
||||||
|
RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library";
|
||||||
|
};
|
||||||
|
|
||||||
# Development tools
|
# Development tools
|
||||||
nativeBuildInputs = nativeBuildInputs ++ (with toolchain; [
|
nativeBuildInputs = nativeBuildInputs pkgsHost ++ [
|
||||||
cargo
|
# Always use nightly rustfmt because most of its options are unstable
|
||||||
clippy
|
#
|
||||||
rust-src
|
# This needs to come before `toolchain` in this list, otherwise
|
||||||
rustc
|
# `$PATH` will have stable rustfmt instead.
|
||||||
rustfmt
|
fenix.packages.${system}.latest.rustfmt
|
||||||
]);
|
|
||||||
};
|
|
||||||
|
|
||||||
checks = {
|
toolchain
|
||||||
packagesDefault = self.packages.${system}.default;
|
] ++ (with pkgsHost; [
|
||||||
devShellsDefault = self.devShells.${system}.default;
|
engage
|
||||||
|
|
||||||
|
# Needed for producing Debian packages
|
||||||
|
cargo-deb
|
||||||
|
|
||||||
|
# Needed for Complement
|
||||||
|
go
|
||||||
|
olm
|
||||||
|
|
||||||
|
# Needed for our script for Complement
|
||||||
|
jq
|
||||||
|
|
||||||
|
# Needed for finding broken markdown links
|
||||||
|
lychee
|
||||||
|
|
||||||
|
# Useful for editing the book locally
|
||||||
|
mdbook
|
||||||
|
]);
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
198
nix/README.md
198
nix/README.md
|
@ -1,198 +0,0 @@
|
||||||
# Conduit for Nix/NixOS
|
|
||||||
|
|
||||||
This guide assumes you have a recent version of Nix (^2.4) installed.
|
|
||||||
|
|
||||||
Since Conduit ships as a Nix flake, you'll first need to [enable
|
|
||||||
flakes][enable_flakes].
|
|
||||||
|
|
||||||
You can now use the usual Nix commands to interact with Conduit's flake. For
|
|
||||||
example, `nix run gitlab:famedly/conduit` will run Conduit (though you'll need
|
|
||||||
to provide configuration and such manually as usual).
|
|
||||||
|
|
||||||
If your NixOS configuration is defined as a flake, you can depend on this flake
|
|
||||||
to provide a more up-to-date version than provided by `nixpkgs`. In your flake,
|
|
||||||
add the following to your `inputs`:
|
|
||||||
|
|
||||||
```nix
|
|
||||||
conduit = {
|
|
||||||
url = "gitlab:famedly/conduit";
|
|
||||||
|
|
||||||
# Assuming you have an input for nixpkgs called `nixpkgs`. If you experience
|
|
||||||
# build failures while using this, try commenting/deleting this line. This
|
|
||||||
# will probably also require you to always build from source.
|
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
|
||||||
};
|
|
||||||
```
|
|
||||||
|
|
||||||
Next, make sure you're passing your flake inputs to the `specialArgs` argument
|
|
||||||
of `nixpkgs.lib.nixosSystem` [as explained here][specialargs]. This guide will
|
|
||||||
assume you've named the group `flake-inputs`.
|
|
||||||
|
|
||||||
Now you can configure Conduit and a reverse proxy for it. Add the following to
|
|
||||||
a new Nix file and include it in your configuration:
|
|
||||||
|
|
||||||
```nix
|
|
||||||
{ config
|
|
||||||
, pkgs
|
|
||||||
, flake-inputs
|
|
||||||
, ...
|
|
||||||
}:
|
|
||||||
|
|
||||||
let
|
|
||||||
# You'll need to edit these values
|
|
||||||
|
|
||||||
# The hostname that will appear in your user and room IDs
|
|
||||||
server_name = "example.com";
|
|
||||||
|
|
||||||
# The hostname that Conduit actually runs on
|
|
||||||
#
|
|
||||||
# This can be the same as `server_name` if you want. This is only necessary
|
|
||||||
# when Conduit is running on a different machine than the one hosting your
|
|
||||||
# root domain. This configuration also assumes this is all running on a single
|
|
||||||
# machine, some tweaks will need to be made if this is not the case.
|
|
||||||
matrix_hostname = "matrix.${server_name}";
|
|
||||||
|
|
||||||
# An admin email for TLS certificate notifications
|
|
||||||
admin_email = "admin@${server_name}";
|
|
||||||
|
|
||||||
# These ones you can leave alone
|
|
||||||
|
|
||||||
# Build a dervation that stores the content of `${server_name}/.well-known/matrix/server`
|
|
||||||
well_known_server = pkgs.writeText "well-known-matrix-server" ''
|
|
||||||
{
|
|
||||||
"m.server": "${matrix_hostname}"
|
|
||||||
}
|
|
||||||
'';
|
|
||||||
|
|
||||||
# Build a dervation that stores the content of `${server_name}/.well-known/matrix/client`
|
|
||||||
well_known_client = pkgs.writeText "well-known-matrix-client" ''
|
|
||||||
{
|
|
||||||
"m.homeserver": {
|
|
||||||
"base_url": "https://${matrix_hostname}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
'';
|
|
||||||
in
|
|
||||||
|
|
||||||
{
|
|
||||||
# Configure Conduit itself
|
|
||||||
services.matrix-conduit = {
|
|
||||||
enable = true;
|
|
||||||
|
|
||||||
# This causes NixOS to use the flake defined in this repository instead of
|
|
||||||
# the build of Conduit built into nixpkgs.
|
|
||||||
package = flake-inputs.conduit.packages.${pkgs.system}.default;
|
|
||||||
|
|
||||||
settings.global = {
|
|
||||||
inherit server_name;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# Configure automated TLS acquisition/renewal
|
|
||||||
security.acme = {
|
|
||||||
acceptTerms = true;
|
|
||||||
defaults = {
|
|
||||||
email = admin_email;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# ACME data must be readable by the NGINX user
|
|
||||||
users.users.nginx.extraGroups = [
|
|
||||||
"acme"
|
|
||||||
];
|
|
||||||
|
|
||||||
# Configure NGINX as a reverse proxy
|
|
||||||
services.nginx = {
|
|
||||||
enable = true;
|
|
||||||
recommendedProxySettings = true;
|
|
||||||
|
|
||||||
virtualHosts = {
|
|
||||||
"${matrix_hostname}" = {
|
|
||||||
forceSSL = true;
|
|
||||||
enableACME = true;
|
|
||||||
|
|
||||||
listen = [
|
|
||||||
{
|
|
||||||
addr = "0.0.0.0";
|
|
||||||
port = 443;
|
|
||||||
ssl = true;
|
|
||||||
}
|
|
||||||
{
|
|
||||||
addr = "[::]";
|
|
||||||
port = 443;
|
|
||||||
ssl = true;
|
|
||||||
} {
|
|
||||||
addr = "0.0.0.0";
|
|
||||||
port = 8448;
|
|
||||||
ssl = true;
|
|
||||||
}
|
|
||||||
{
|
|
||||||
addr = "[::]";
|
|
||||||
port = 8448;
|
|
||||||
ssl = true;
|
|
||||||
}
|
|
||||||
];
|
|
||||||
|
|
||||||
locations."/_matrix/" = {
|
|
||||||
proxyPass = "http://backend_conduit$request_uri";
|
|
||||||
proxyWebsockets = true;
|
|
||||||
extraConfig = ''
|
|
||||||
proxy_set_header Host $host;
|
|
||||||
proxy_buffering off;
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
extraConfig = ''
|
|
||||||
merge_slashes off;
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
"${server_name}" = {
|
|
||||||
forceSSL = true;
|
|
||||||
enableACME = true;
|
|
||||||
|
|
||||||
locations."=/.well-known/matrix/server" = {
|
|
||||||
# Use the contents of the derivation built previously
|
|
||||||
alias = "${well_known_server}";
|
|
||||||
|
|
||||||
extraConfig = ''
|
|
||||||
# Set the header since by default NGINX thinks it's just bytes
|
|
||||||
default_type application/json;
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
locations."=/.well-known/matrix/client" = {
|
|
||||||
# Use the contents of the derivation built previously
|
|
||||||
alias = "${well_known_client}";
|
|
||||||
|
|
||||||
extraConfig = ''
|
|
||||||
# Set the header since by default NGINX thinks it's just bytes
|
|
||||||
default_type application/json;
|
|
||||||
|
|
||||||
# https://matrix.org/docs/spec/client_server/r0.4.0#web-browser-clients
|
|
||||||
add_header Access-Control-Allow-Origin "*";
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
upstreams = {
|
|
||||||
"backend_conduit" = {
|
|
||||||
servers = {
|
|
||||||
"[::1]:${toString config.services.matrix-conduit.settings.global.port}" = { };
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# Open firewall ports for HTTP, HTTPS, and Matrix federation
|
|
||||||
networking.firewall.allowedTCPPorts = [ 80 443 8448 ];
|
|
||||||
networking.firewall.allowedUDPPorts = [ 80 443 8448 ];
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Now you can rebuild your system configuration and you should be good to go!
|
|
||||||
|
|
||||||
[enable_flakes]: https://nixos.wiki/wiki/Flakes#Enable_flakes
|
|
||||||
|
|
||||||
[specialargs]: https://nixos.wiki/wiki/Flakes#Using_nix_flakes_with_NixOS
|
|
22
rust-toolchain.toml
Normal file
22
rust-toolchain.toml
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
# This is the authoritiative configuration of this project's Rust toolchain.
|
||||||
|
#
|
||||||
|
# Other files that need upkeep when this changes:
|
||||||
|
#
|
||||||
|
# * `.gitlab-ci.yml`
|
||||||
|
# * `Cargo.toml`
|
||||||
|
# * `flake.nix`
|
||||||
|
#
|
||||||
|
# Search in those files for `rust-toolchain.toml` to find the relevant places.
|
||||||
|
# If you're having trouble making the relevant changes, bug a maintainer.
|
||||||
|
|
||||||
|
[toolchain]
|
||||||
|
channel = "1.75.0"
|
||||||
|
components = [
|
||||||
|
# For rust-analyzer
|
||||||
|
"rust-src",
|
||||||
|
]
|
||||||
|
targets = [
|
||||||
|
"x86_64-unknown-linux-gnu",
|
||||||
|
"x86_64-unknown-linux-musl",
|
||||||
|
"aarch64-unknown-linux-musl",
|
||||||
|
]
|
|
@ -1,23 +1,34 @@
|
||||||
use crate::{services, utils, Error, Result};
|
use crate::{services, utils, Error, Result};
|
||||||
use bytes::BytesMut;
|
use bytes::BytesMut;
|
||||||
use ruma::api::{IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken};
|
use ruma::api::{
|
||||||
|
appservice::Registration, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken,
|
||||||
|
};
|
||||||
use std::{fmt::Debug, mem, time::Duration};
|
use std::{fmt::Debug, mem, time::Duration};
|
||||||
use tracing::warn;
|
use tracing::warn;
|
||||||
|
|
||||||
|
/// Sends a request to an appservice
|
||||||
|
///
|
||||||
|
/// Only returns None if there is no url specified in the appservice registration file
|
||||||
#[tracing::instrument(skip(request))]
|
#[tracing::instrument(skip(request))]
|
||||||
pub(crate) async fn send_request<T: OutgoingRequest>(
|
pub(crate) async fn send_request<T: OutgoingRequest>(
|
||||||
registration: serde_yaml::Value,
|
registration: Registration,
|
||||||
request: T,
|
request: T,
|
||||||
) -> Result<T::IncomingResponse>
|
) -> Result<Option<T::IncomingResponse>>
|
||||||
where
|
where
|
||||||
T: Debug,
|
T: Debug,
|
||||||
{
|
{
|
||||||
let destination = registration.get("url").unwrap().as_str().unwrap();
|
let destination = match registration.url {
|
||||||
let hs_token = registration.get("hs_token").unwrap().as_str().unwrap();
|
Some(url) => url,
|
||||||
|
None => {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let hs_token = registration.hs_token.as_str();
|
||||||
|
|
||||||
let mut http_request = request
|
let mut http_request = request
|
||||||
.try_into_http_request::<BytesMut>(
|
.try_into_http_request::<BytesMut>(
|
||||||
destination,
|
&destination,
|
||||||
SendAccessToken::IfRequired(hs_token),
|
SendAccessToken::IfRequired(hs_token),
|
||||||
&[MatrixVersion::V1_0],
|
&[MatrixVersion::V1_0],
|
||||||
)
|
)
|
||||||
|
@ -39,8 +50,7 @@ where
|
||||||
);
|
);
|
||||||
*http_request.uri_mut() = parts.try_into().expect("our manipulation is always valid");
|
*http_request.uri_mut() = parts.try_into().expect("our manipulation is always valid");
|
||||||
|
|
||||||
let mut reqwest_request = reqwest::Request::try_from(http_request)
|
let mut reqwest_request = reqwest::Request::try_from(http_request)?;
|
||||||
.expect("all http requests are valid reqwest requests");
|
|
||||||
|
|
||||||
*reqwest_request.timeout_mut() = Some(Duration::from_secs(30));
|
*reqwest_request.timeout_mut() = Some(Duration::from_secs(30));
|
||||||
|
|
||||||
|
@ -55,9 +65,7 @@ where
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!(
|
warn!(
|
||||||
"Could not send request to appservice {:?} at {}: {}",
|
"Could not send request to appservice {:?} at {}: {}",
|
||||||
registration.get("id"),
|
registration.id, destination, e
|
||||||
destination,
|
|
||||||
e
|
|
||||||
);
|
);
|
||||||
return Err(e.into());
|
return Err(e.into());
|
||||||
}
|
}
|
||||||
|
@ -95,7 +103,8 @@ where
|
||||||
.body(body)
|
.body(body)
|
||||||
.expect("reqwest body is valid http body"),
|
.expect("reqwest body is valid http body"),
|
||||||
);
|
);
|
||||||
response.map_err(|_| {
|
|
||||||
|
response.map(Some).map_err(|_| {
|
||||||
warn!(
|
warn!(
|
||||||
"Appservice returned invalid response bytes {}\n{}",
|
"Appservice returned invalid response bytes {}\n{}",
|
||||||
destination, url
|
destination, url
|
||||||
|
|
|
@ -3,7 +3,8 @@ use crate::{api::client_server, services, utils, Error, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
account::{
|
account::{
|
||||||
change_password, deactivate, get_3pids, get_username_availability, register,
|
change_password, deactivate, get_3pids, get_username_availability,
|
||||||
|
register::{self, LoginType},
|
||||||
request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn,
|
request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn,
|
||||||
whoami, ThirdPartyIdRemovalStatus,
|
whoami, ThirdPartyIdRemovalStatus,
|
||||||
},
|
},
|
||||||
|
@ -84,6 +85,13 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if body.body.login_type == Some(LoginType::ApplicationService) && !body.from_appservice {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::MissingToken,
|
||||||
|
"Missing appservice token.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
let is_guest = body.kind == RegistrationKind::Guest;
|
let is_guest = body.kind == RegistrationKind::Guest;
|
||||||
|
|
||||||
let user_id = match (&body.username, is_guest) {
|
let user_id = match (&body.username, is_guest) {
|
||||||
|
@ -239,13 +247,22 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
|
||||||
|
|
||||||
// If this is the first real user, grant them admin privileges
|
// If this is the first real user, grant them admin privileges
|
||||||
// Note: the server user, @conduit:servername, is generated first
|
// Note: the server user, @conduit:servername, is generated first
|
||||||
if services().users.count()? == 2 {
|
if !is_guest {
|
||||||
services()
|
if let Some(admin_room) = services().admin.get_admin_room()? {
|
||||||
.admin
|
if services()
|
||||||
.make_user_admin(&user_id, displayname)
|
.rooms
|
||||||
.await?;
|
.state_cache
|
||||||
|
.room_joined_count(&admin_room)?
|
||||||
|
== Some(1)
|
||||||
|
{
|
||||||
|
services()
|
||||||
|
.admin
|
||||||
|
.make_user_admin(&user_id, displayname)
|
||||||
|
.await?;
|
||||||
|
|
||||||
warn!("Granting {} admin privileges as the first user", user_id);
|
warn!("Granting {} admin privileges as the first user", user_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(register::v3::Response {
|
Ok(register::v3::Response {
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
use crate::{services, Error, Result, Ruma};
|
use crate::{services, Error, Result, Ruma};
|
||||||
use rand::seq::SliceRandom;
|
use rand::seq::SliceRandom;
|
||||||
use regex::Regex;
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{
|
api::{
|
||||||
appservice,
|
appservice,
|
||||||
|
@ -101,31 +100,20 @@ pub(crate) async fn get_alias_helper(
|
||||||
match services().rooms.alias.resolve_local_alias(&room_alias)? {
|
match services().rooms.alias.resolve_local_alias(&room_alias)? {
|
||||||
Some(r) => room_id = Some(r),
|
Some(r) => room_id = Some(r),
|
||||||
None => {
|
None => {
|
||||||
for (_id, registration) in services().appservice.all()? {
|
for appservice in services().appservice.read().await.values() {
|
||||||
let aliases = registration
|
if appservice.aliases.is_match(room_alias.as_str())
|
||||||
.get("namespaces")
|
&& matches!(
|
||||||
.and_then(|ns| ns.get("aliases"))
|
services()
|
||||||
.and_then(|aliases| aliases.as_sequence())
|
.sending
|
||||||
.map_or_else(Vec::new, |aliases| {
|
.send_appservice_request(
|
||||||
aliases
|
appservice.registration.clone(),
|
||||||
.iter()
|
appservice::query::query_room_alias::v1::Request {
|
||||||
.filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok())
|
room_alias: room_alias.clone(),
|
||||||
.collect::<Vec<_>>()
|
},
|
||||||
});
|
)
|
||||||
|
.await,
|
||||||
if aliases
|
Ok(Some(_opt_result))
|
||||||
.iter()
|
)
|
||||||
.any(|aliases| aliases.is_match(room_alias.as_str()))
|
|
||||||
&& services()
|
|
||||||
.sending
|
|
||||||
.send_appservice_request(
|
|
||||||
registration,
|
|
||||||
appservice::query::query_room_alias::v1::Request {
|
|
||||||
room_alias: room_alias.clone(),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.is_ok()
|
|
||||||
{
|
{
|
||||||
room_id = Some(
|
room_id = Some(
|
||||||
services()
|
services()
|
||||||
|
|
|
@ -339,17 +339,19 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
|
|
||||||
let mut failures = BTreeMap::new();
|
let mut failures = BTreeMap::new();
|
||||||
|
|
||||||
let back_off = |id| match services()
|
let back_off = |id| async {
|
||||||
.globals
|
match services()
|
||||||
.bad_query_ratelimiter
|
.globals
|
||||||
.write()
|
.bad_query_ratelimiter
|
||||||
.unwrap()
|
.write()
|
||||||
.entry(id)
|
.await
|
||||||
{
|
.entry(id)
|
||||||
hash_map::Entry::Vacant(e) => {
|
{
|
||||||
e.insert((Instant::now(), 1));
|
hash_map::Entry::Vacant(e) => {
|
||||||
|
e.insert((Instant::now(), 1));
|
||||||
|
}
|
||||||
|
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
||||||
}
|
}
|
||||||
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut futures: FuturesUnordered<_> = get_over_federation
|
let mut futures: FuturesUnordered<_> = get_over_federation
|
||||||
|
@ -359,8 +361,8 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
.globals
|
.globals
|
||||||
.bad_query_ratelimiter
|
.bad_query_ratelimiter
|
||||||
.read()
|
.read()
|
||||||
.unwrap()
|
.await
|
||||||
.get(&*server)
|
.get(server)
|
||||||
{
|
{
|
||||||
// Exponential backoff
|
// Exponential backoff
|
||||||
let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries);
|
let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries);
|
||||||
|
@ -393,7 +395,7 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| Error::BadServerResponse("Query took too long")),
|
.map_err(|_e| Error::BadServerResponse("Query took too long")),
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
@ -428,7 +430,8 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
device_keys.extend(response.device_keys);
|
device_keys.extend(response.device_keys);
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
back_off(server.to_owned());
|
back_off(server.to_owned()).await;
|
||||||
|
|
||||||
failures.insert(server.to_string(), json!({}));
|
failures.insert(server.to_string(), json!({}));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,7 +51,7 @@ pub async fn create_content_route(
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(create_content::v3::Response {
|
Ok(create_content::v3::Response {
|
||||||
content_uri: mxc.try_into().expect("Invalid mxc:// URI"),
|
content_uri: mxc.into(),
|
||||||
blurhash: None,
|
blurhash: None,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,9 +26,10 @@ use ruma::{
|
||||||
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
||||||
use std::{
|
use std::{
|
||||||
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet},
|
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet},
|
||||||
sync::{Arc, RwLock},
|
sync::Arc,
|
||||||
time::{Duration, Instant},
|
time::{Duration, Instant},
|
||||||
};
|
};
|
||||||
|
use tokio::sync::RwLock;
|
||||||
use tracing::{debug, error, info, warn};
|
use tracing::{debug, error, info, warn};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
|
@ -64,7 +65,12 @@ pub async fn join_room_by_id_route(
|
||||||
.map(|user| user.server_name().to_owned()),
|
.map(|user| user.server_name().to_owned()),
|
||||||
);
|
);
|
||||||
|
|
||||||
servers.push(body.room_id.server_name().to_owned());
|
servers.push(
|
||||||
|
body.room_id
|
||||||
|
.server_name()
|
||||||
|
.expect("Room IDs should always have a server name")
|
||||||
|
.into(),
|
||||||
|
);
|
||||||
|
|
||||||
join_room_by_id_helper(
|
join_room_by_id_helper(
|
||||||
body.sender_user.as_deref(),
|
body.sender_user.as_deref(),
|
||||||
|
@ -105,7 +111,12 @@ pub async fn join_room_by_id_or_alias_route(
|
||||||
.map(|user| user.server_name().to_owned()),
|
.map(|user| user.server_name().to_owned()),
|
||||||
);
|
);
|
||||||
|
|
||||||
servers.push(room_id.server_name().to_owned());
|
servers.push(
|
||||||
|
room_id
|
||||||
|
.server_name()
|
||||||
|
.expect("Room IDs should always have a server name")
|
||||||
|
.into(),
|
||||||
|
);
|
||||||
|
|
||||||
(servers, room_id)
|
(servers, room_id)
|
||||||
}
|
}
|
||||||
|
@ -202,24 +213,28 @@ pub async fn kick_user_route(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.entry(body.room_id.clone())
|
.entry(body.room_id.clone())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomMember,
|
.timeline
|
||||||
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
.build_and_append_pdu(
|
||||||
unsigned: None,
|
PduBuilder {
|
||||||
state_key: Some(body.user_id.to_string()),
|
event_type: TimelineEventType::RoomMember,
|
||||||
redacts: None,
|
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
||||||
},
|
unsigned: None,
|
||||||
sender_user,
|
state_key: Some(body.user_id.to_string()),
|
||||||
&body.room_id,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
|
|
||||||
|
@ -255,6 +270,7 @@ pub async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<ban_use
|
||||||
serde_json::from_str(event.content.get())
|
serde_json::from_str(event.content.get())
|
||||||
.map(|event: RoomMemberEventContent| RoomMemberEventContent {
|
.map(|event: RoomMemberEventContent| RoomMemberEventContent {
|
||||||
membership: MembershipState::Ban,
|
membership: MembershipState::Ban,
|
||||||
|
join_authorized_via_users_server: None,
|
||||||
..event
|
..event
|
||||||
})
|
})
|
||||||
.map_err(|_| Error::bad_database("Invalid member event in database."))
|
.map_err(|_| Error::bad_database("Invalid member event in database."))
|
||||||
|
@ -266,24 +282,28 @@ pub async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<ban_use
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.entry(body.room_id.clone())
|
.entry(body.room_id.clone())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomMember,
|
.timeline
|
||||||
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
.build_and_append_pdu(
|
||||||
unsigned: None,
|
PduBuilder {
|
||||||
state_key: Some(body.user_id.to_string()),
|
event_type: TimelineEventType::RoomMember,
|
||||||
redacts: None,
|
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
||||||
},
|
unsigned: None,
|
||||||
sender_user,
|
state_key: Some(body.user_id.to_string()),
|
||||||
&body.room_id,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
|
|
||||||
|
@ -324,24 +344,28 @@ pub async fn unban_user_route(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.entry(body.room_id.clone())
|
.entry(body.room_id.clone())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomMember,
|
.timeline
|
||||||
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
.build_and_append_pdu(
|
||||||
unsigned: None,
|
PduBuilder {
|
||||||
state_key: Some(body.user_id.to_string()),
|
event_type: TimelineEventType::RoomMember,
|
||||||
redacts: None,
|
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
||||||
},
|
unsigned: None,
|
||||||
sender_user,
|
state_key: Some(body.user_id.to_string()),
|
||||||
&body.room_id,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
|
|
||||||
|
@ -400,7 +424,7 @@ pub async fn get_member_events_route(
|
||||||
if !services()
|
if !services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.user_can_see_state_events(&sender_user, &body.room_id)?
|
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
|
@ -435,7 +459,7 @@ pub async fn joined_members_route(
|
||||||
if !services()
|
if !services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.user_can_see_state_events(&sender_user, &body.room_id)?
|
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
|
@ -479,7 +503,7 @@ async fn join_room_by_id_helper(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.entry(room_id.to_owned())
|
.entry(room_id.to_owned())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
|
@ -619,7 +643,7 @@ async fn join_room_by_id_helper(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Ok(signature) = signed_value["signatures"]
|
match signed_value["signatures"]
|
||||||
.as_object()
|
.as_object()
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
|
@ -630,18 +654,20 @@ async fn join_room_by_id_helper(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Server did not send its signature",
|
"Server did not send its signature",
|
||||||
))
|
))
|
||||||
})
|
}) {
|
||||||
{
|
Ok(signature) => {
|
||||||
join_event
|
join_event
|
||||||
.get_mut("signatures")
|
.get_mut("signatures")
|
||||||
.expect("we created a valid pdu")
|
.expect("we created a valid pdu")
|
||||||
.as_object_mut()
|
.as_object_mut()
|
||||||
.expect("we created a valid pdu")
|
.expect("we created a valid pdu")
|
||||||
.insert(remote_server.to_string(), signature.clone());
|
.insert(remote_server.to_string(), signature.clone());
|
||||||
} else {
|
}
|
||||||
warn!(
|
Err(e) => {
|
||||||
"Server {remote_server} sent invalid signature in sendjoin signatures for event {signed_value:?}",
|
warn!(
|
||||||
);
|
"Server {remote_server} sent invalid signature in sendjoin signatures for event {signed_value:?}: {e:?}",
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -668,7 +694,7 @@ async fn join_room_by_id_helper(
|
||||||
.iter()
|
.iter()
|
||||||
.map(|pdu| validate_and_add_event_id(pdu, &room_version_id, &pub_key_map))
|
.map(|pdu| validate_and_add_event_id(pdu, &room_version_id, &pub_key_map))
|
||||||
{
|
{
|
||||||
let (event_id, value) = match result {
|
let (event_id, value) = match result.await {
|
||||||
Ok(t) => t,
|
Ok(t) => t,
|
||||||
Err(_) => continue,
|
Err(_) => continue,
|
||||||
};
|
};
|
||||||
|
@ -698,7 +724,7 @@ async fn join_room_by_id_helper(
|
||||||
.iter()
|
.iter()
|
||||||
.map(|pdu| validate_and_add_event_id(pdu, &room_version_id, &pub_key_map))
|
.map(|pdu| validate_and_add_event_id(pdu, &room_version_id, &pub_key_map))
|
||||||
{
|
{
|
||||||
let (event_id, value) = match result {
|
let (event_id, value) = match result.await {
|
||||||
Ok(t) => t,
|
Ok(t) => t,
|
||||||
Err(_) => continue,
|
Err(_) => continue,
|
||||||
};
|
};
|
||||||
|
@ -710,7 +736,7 @@ async fn join_room_by_id_helper(
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("Running send_join auth check");
|
info!("Running send_join auth check");
|
||||||
if !state_res::event_auth::auth_check(
|
let authenticated = state_res::event_auth::auth_check(
|
||||||
&state_res::RoomVersion::new(&room_version_id).expect("room version is supported"),
|
&state_res::RoomVersion::new(&room_version_id).expect("room version is supported"),
|
||||||
&parsed_join_pdu,
|
&parsed_join_pdu,
|
||||||
None::<PduEvent>, // TODO: third party invite
|
None::<PduEvent>, // TODO: third party invite
|
||||||
|
@ -733,7 +759,9 @@ async fn join_room_by_id_helper(
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
warn!("Auth check failed: {e}");
|
warn!("Auth check failed: {e}");
|
||||||
Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")
|
Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")
|
||||||
})? {
|
})?;
|
||||||
|
|
||||||
|
if !authenticated {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Auth check failed",
|
"Auth check failed",
|
||||||
|
@ -770,12 +798,16 @@ async fn join_room_by_id_helper(
|
||||||
let statehash_after_join = services().rooms.state.append_to_state(&parsed_join_pdu)?;
|
let statehash_after_join = services().rooms.state.append_to_state(&parsed_join_pdu)?;
|
||||||
|
|
||||||
info!("Appending new room join event");
|
info!("Appending new room join event");
|
||||||
services().rooms.timeline.append_pdu(
|
services()
|
||||||
&parsed_join_pdu,
|
.rooms
|
||||||
join_event,
|
.timeline
|
||||||
vec![(*parsed_join_pdu.event_id).to_owned()],
|
.append_pdu(
|
||||||
&state_lock,
|
&parsed_join_pdu,
|
||||||
)?;
|
join_event,
|
||||||
|
vec![(*parsed_join_pdu.event_id).to_owned()],
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
info!("Setting final room state for new room");
|
info!("Setting final room state for new room");
|
||||||
// We set the room state after inserting the pdu, so that we never have a moment in time
|
// We set the room state after inserting the pdu, so that we never have a moment in time
|
||||||
|
@ -888,18 +920,23 @@ async fn join_room_by_id_helper(
|
||||||
};
|
};
|
||||||
|
|
||||||
// Try normal join first
|
// Try normal join first
|
||||||
let error = match services().rooms.timeline.build_and_append_pdu(
|
let error = match services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomMember,
|
.timeline
|
||||||
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
.build_and_append_pdu(
|
||||||
unsigned: None,
|
PduBuilder {
|
||||||
state_key: Some(sender_user.to_string()),
|
event_type: TimelineEventType::RoomMember,
|
||||||
redacts: None,
|
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
||||||
},
|
unsigned: None,
|
||||||
sender_user,
|
state_key: Some(sender_user.to_string()),
|
||||||
room_id,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
) {
|
sender_user,
|
||||||
|
room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
Ok(_event_id) => return Ok(join_room_by_id::v3::Response::new(room_id.to_owned())),
|
Ok(_event_id) => return Ok(join_room_by_id::v3::Response::new(room_id.to_owned())),
|
||||||
Err(e) => e,
|
Err(e) => e,
|
||||||
};
|
};
|
||||||
|
@ -1095,7 +1132,7 @@ async fn make_join_request(
|
||||||
make_join_response_and_server
|
make_join_response_and_server
|
||||||
}
|
}
|
||||||
|
|
||||||
fn validate_and_add_event_id(
|
async fn validate_and_add_event_id(
|
||||||
pdu: &RawJsonValue,
|
pdu: &RawJsonValue,
|
||||||
room_version: &RoomVersionId,
|
room_version: &RoomVersionId,
|
||||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||||
|
@ -1111,24 +1148,26 @@ fn validate_and_add_event_id(
|
||||||
))
|
))
|
||||||
.expect("ruma's reference hashes are valid event ids");
|
.expect("ruma's reference hashes are valid event ids");
|
||||||
|
|
||||||
let back_off = |id| match services()
|
let back_off = |id| async {
|
||||||
.globals
|
match services()
|
||||||
.bad_event_ratelimiter
|
.globals
|
||||||
.write()
|
.bad_event_ratelimiter
|
||||||
.unwrap()
|
.write()
|
||||||
.entry(id)
|
.await
|
||||||
{
|
.entry(id)
|
||||||
Entry::Vacant(e) => {
|
{
|
||||||
e.insert((Instant::now(), 1));
|
Entry::Vacant(e) => {
|
||||||
|
e.insert((Instant::now(), 1));
|
||||||
|
}
|
||||||
|
Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
||||||
}
|
}
|
||||||
Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some((time, tries)) = services()
|
if let Some((time, tries)) = services()
|
||||||
.globals
|
.globals
|
||||||
.bad_event_ratelimiter
|
.bad_event_ratelimiter
|
||||||
.read()
|
.read()
|
||||||
.unwrap()
|
.await
|
||||||
.get(&event_id)
|
.get(&event_id)
|
||||||
{
|
{
|
||||||
// Exponential backoff
|
// Exponential backoff
|
||||||
|
@ -1143,15 +1182,10 @@ fn validate_and_add_event_id(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Err(e) = ruma::signatures::verify_event(
|
if let Err(e) = ruma::signatures::verify_event(&*pub_key_map.read().await, &value, room_version)
|
||||||
&*pub_key_map
|
{
|
||||||
.read()
|
|
||||||
.map_err(|_| Error::bad_database("RwLock is poisoned."))?,
|
|
||||||
&value,
|
|
||||||
room_version,
|
|
||||||
) {
|
|
||||||
warn!("Event {} failed verification {:?} {}", event_id, pdu, e);
|
warn!("Event {} failed verification {:?} {}", event_id, pdu, e);
|
||||||
back_off(event_id);
|
back_off(event_id).await;
|
||||||
return Err(Error::BadServerResponse("Event failed verification."));
|
return Err(Error::BadServerResponse("Event failed verification."));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1177,7 +1211,7 @@ pub(crate) async fn invite_helper<'a>(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.entry(room_id.to_owned())
|
.entry(room_id.to_owned())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
|
@ -1298,34 +1332,38 @@ pub(crate) async fn invite_helper<'a>(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.entry(room_id.to_owned())
|
.entry(room_id.to_owned())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomMember,
|
.timeline
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
.build_and_append_pdu(
|
||||||
membership: MembershipState::Invite,
|
PduBuilder {
|
||||||
displayname: services().users.displayname(user_id)?,
|
event_type: TimelineEventType::RoomMember,
|
||||||
avatar_url: services().users.avatar_url(user_id)?,
|
content: to_raw_value(&RoomMemberEventContent {
|
||||||
is_direct: Some(is_direct),
|
membership: MembershipState::Invite,
|
||||||
third_party_invite: None,
|
displayname: services().users.displayname(user_id)?,
|
||||||
blurhash: services().users.blurhash(user_id)?,
|
avatar_url: services().users.avatar_url(user_id)?,
|
||||||
reason,
|
is_direct: Some(is_direct),
|
||||||
join_authorized_via_users_server: None,
|
third_party_invite: None,
|
||||||
})
|
blurhash: services().users.blurhash(user_id)?,
|
||||||
.expect("event is valid, we just created it"),
|
reason,
|
||||||
unsigned: None,
|
join_authorized_via_users_server: None,
|
||||||
state_key: Some(user_id.to_string()),
|
})
|
||||||
redacts: None,
|
.expect("event is valid, we just created it"),
|
||||||
},
|
unsigned: None,
|
||||||
sender_user,
|
state_key: Some(user_id.to_string()),
|
||||||
room_id,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
sender_user,
|
||||||
|
room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
|
|
||||||
|
@ -1362,7 +1400,7 @@ pub async fn leave_all_rooms(user_id: &UserId) -> Result<()> {
|
||||||
pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<String>) -> Result<()> {
|
pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<String>) -> Result<()> {
|
||||||
// Ask a remote server if we don't have this room
|
// Ask a remote server if we don't have this room
|
||||||
if !services().rooms.metadata.exists(room_id)?
|
if !services().rooms.metadata.exists(room_id)?
|
||||||
&& room_id.server_name() != services().globals.server_name()
|
&& room_id.server_name() != Some(services().globals.server_name())
|
||||||
{
|
{
|
||||||
if let Err(e) = remote_leave_room(user_id, room_id).await {
|
if let Err(e) = remote_leave_room(user_id, room_id).await {
|
||||||
warn!("Failed to leave room {} remotely: {}", user_id, e);
|
warn!("Failed to leave room {} remotely: {}", user_id, e);
|
||||||
|
@ -1393,7 +1431,7 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<Strin
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.entry(room_id.to_owned())
|
.entry(room_id.to_owned())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
|
@ -1428,19 +1466,24 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<Strin
|
||||||
|
|
||||||
event.membership = MembershipState::Leave;
|
event.membership = MembershipState::Leave;
|
||||||
event.reason = reason;
|
event.reason = reason;
|
||||||
|
event.join_authorized_via_users_server = None;
|
||||||
|
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomMember,
|
.timeline
|
||||||
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
.build_and_append_pdu(
|
||||||
unsigned: None,
|
PduBuilder {
|
||||||
state_key: Some(user_id.to_string()),
|
event_type: TimelineEventType::RoomMember,
|
||||||
redacts: None,
|
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
||||||
},
|
unsigned: None,
|
||||||
user_id,
|
state_key: Some(user_id.to_string()),
|
||||||
room_id,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
user_id,
|
||||||
|
room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
|
@ -32,7 +32,7 @@ pub async fn send_message_event_route(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.entry(body.room_id.clone())
|
.entry(body.room_id.clone())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
|
@ -73,19 +73,23 @@ pub async fn send_message_event_route(
|
||||||
let mut unsigned = BTreeMap::new();
|
let mut unsigned = BTreeMap::new();
|
||||||
unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into());
|
unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into());
|
||||||
|
|
||||||
let event_id = services().rooms.timeline.build_and_append_pdu(
|
let event_id = services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: body.event_type.to_string().into(),
|
.timeline
|
||||||
content: serde_json::from_str(body.body.body.json().get())
|
.build_and_append_pdu(
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?,
|
PduBuilder {
|
||||||
unsigned: Some(unsigned),
|
event_type: body.event_type.to_string().into(),
|
||||||
state_key: None,
|
content: serde_json::from_str(body.body.body.json().get())
|
||||||
redacts: None,
|
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?,
|
||||||
},
|
unsigned: Some(unsigned),
|
||||||
sender_user,
|
state_key: None,
|
||||||
&body.room_id,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
services().transaction_ids.add_txnid(
|
services().transaction_ids.add_txnid(
|
||||||
sender_user,
|
sender_user,
|
||||||
|
@ -124,14 +128,13 @@ pub async fn get_message_events_route(
|
||||||
let to = body
|
let to = body
|
||||||
.to
|
.to
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.and_then(|t| PduCount::try_from_string(&t).ok());
|
.and_then(|t| PduCount::try_from_string(t).ok());
|
||||||
|
|
||||||
services().rooms.lazy_loading.lazy_load_confirm_delivery(
|
services()
|
||||||
sender_user,
|
.rooms
|
||||||
sender_device,
|
.lazy_loading
|
||||||
&body.room_id,
|
.lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)
|
||||||
from,
|
.await?;
|
||||||
)?;
|
|
||||||
|
|
||||||
let limit = u64::from(body.limit).min(100) as usize;
|
let limit = u64::from(body.limit).min(100) as usize;
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,8 @@
|
||||||
use crate::{services, utils, Result, Ruma};
|
use crate::{services, utils, Error, Result, Ruma};
|
||||||
use ruma::api::client::presence::{get_presence, set_presence};
|
use ruma::api::client::{
|
||||||
|
error::ErrorKind,
|
||||||
|
presence::{get_presence, set_presence},
|
||||||
|
};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/presence/{userId}/status`
|
/// # `PUT /_matrix/client/r0/presence/{userId}/status`
|
||||||
|
@ -79,6 +82,9 @@ pub async fn get_presence_route(
|
||||||
presence: presence.content.presence,
|
presence: presence.content.presence,
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
todo!();
|
Err(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"Presence state for this user was not found",
|
||||||
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,6 +40,7 @@ pub async fn set_displayname_route(
|
||||||
event_type: TimelineEventType::RoomMember,
|
event_type: TimelineEventType::RoomMember,
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
content: to_raw_value(&RoomMemberEventContent {
|
||||||
displayname: body.displayname.clone(),
|
displayname: body.displayname.clone(),
|
||||||
|
join_authorized_via_users_server: None,
|
||||||
..serde_json::from_str(
|
..serde_json::from_str(
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -77,18 +78,17 @@ pub async fn set_displayname_route(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.entry(room_id.clone())
|
.entry(room_id.clone())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
let _ = services().rooms.timeline.build_and_append_pdu(
|
let _ = services()
|
||||||
pdu_builder,
|
.rooms
|
||||||
sender_user,
|
.timeline
|
||||||
&room_id,
|
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)
|
||||||
&state_lock,
|
.await;
|
||||||
);
|
|
||||||
|
|
||||||
// Presence update
|
// Presence update
|
||||||
services().rooms.edus.presence.update_presence(
|
services().rooms.edus.presence.update_presence(
|
||||||
|
@ -175,6 +175,7 @@ pub async fn set_avatar_url_route(
|
||||||
event_type: TimelineEventType::RoomMember,
|
event_type: TimelineEventType::RoomMember,
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
content: to_raw_value(&RoomMemberEventContent {
|
||||||
avatar_url: body.avatar_url.clone(),
|
avatar_url: body.avatar_url.clone(),
|
||||||
|
join_authorized_via_users_server: None,
|
||||||
..serde_json::from_str(
|
..serde_json::from_str(
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -212,18 +213,17 @@ pub async fn set_avatar_url_route(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.entry(room_id.clone())
|
.entry(room_id.clone())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
let _ = services().rooms.timeline.build_and_append_pdu(
|
let _ = services()
|
||||||
pdu_builder,
|
.rooms
|
||||||
sender_user,
|
.timeline
|
||||||
&room_id,
|
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)
|
||||||
&state_lock,
|
.await;
|
||||||
);
|
|
||||||
|
|
||||||
// Presence update
|
// Presence update
|
||||||
services().rooms.edus.presence.update_presence(
|
services().rooms.edus.presence.update_presence(
|
||||||
|
|
|
@ -24,28 +24,32 @@ pub async fn redact_event_route(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.entry(body.room_id.clone())
|
.entry(body.room_id.clone())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
let event_id = services().rooms.timeline.build_and_append_pdu(
|
let event_id = services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomRedaction,
|
.timeline
|
||||||
content: to_raw_value(&RoomRedactionEventContent {
|
.build_and_append_pdu(
|
||||||
redacts: Some(body.event_id.clone()),
|
PduBuilder {
|
||||||
reason: body.reason.clone(),
|
event_type: TimelineEventType::RoomRedaction,
|
||||||
})
|
content: to_raw_value(&RoomRedactionEventContent {
|
||||||
.expect("event is valid, we just created it"),
|
redacts: Some(body.event_id.clone()),
|
||||||
unsigned: None,
|
reason: body.reason.clone(),
|
||||||
state_key: None,
|
})
|
||||||
redacts: Some(body.event_id.into()),
|
.expect("event is valid, we just created it"),
|
||||||
},
|
unsigned: None,
|
||||||
sender_user,
|
state_key: None,
|
||||||
&body.room_id,
|
redacts: Some(body.event_id.into()),
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,7 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route(
|
||||||
let to = body
|
let to = body
|
||||||
.to
|
.to
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.and_then(|t| PduCount::try_from_string(&t).ok());
|
.and_then(|t| PduCount::try_from_string(t).ok());
|
||||||
|
|
||||||
// Use limit or else 10, with maximum 100
|
// Use limit or else 10, with maximum 100
|
||||||
let limit = body
|
let limit = body
|
||||||
|
@ -73,7 +73,7 @@ pub async fn get_relating_events_with_rel_type_route(
|
||||||
let to = body
|
let to = body
|
||||||
.to
|
.to
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.and_then(|t| PduCount::try_from_string(&t).ok());
|
.and_then(|t| PduCount::try_from_string(t).ok());
|
||||||
|
|
||||||
// Use limit or else 10, with maximum 100
|
// Use limit or else 10, with maximum 100
|
||||||
let limit = body
|
let limit = body
|
||||||
|
@ -121,7 +121,7 @@ pub async fn get_relating_events_route(
|
||||||
let to = body
|
let to = body
|
||||||
.to
|
.to
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.and_then(|t| PduCount::try_from_string(&t).ok());
|
.and_then(|t| PduCount::try_from_string(t).ok());
|
||||||
|
|
||||||
// Use limit or else 10, with maximum 100
|
// Use limit or else 10, with maximum 100
|
||||||
let limit = body
|
let limit = body
|
||||||
|
|
|
@ -61,7 +61,7 @@ pub async fn create_room_route(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.entry(room_id.clone())
|
.entry(room_id.clone())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
|
@ -204,42 +204,50 @@ pub async fn create_room_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
// 1. The room create event
|
// 1. The room create event
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomCreate,
|
.timeline
|
||||||
content: to_raw_value(&content).expect("event is valid, we just created it"),
|
.build_and_append_pdu(
|
||||||
unsigned: None,
|
PduBuilder {
|
||||||
state_key: Some("".to_owned()),
|
event_type: TimelineEventType::RoomCreate,
|
||||||
redacts: None,
|
content: to_raw_value(&content).expect("event is valid, we just created it"),
|
||||||
},
|
unsigned: None,
|
||||||
sender_user,
|
state_key: Some("".to_owned()),
|
||||||
&room_id,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
sender_user,
|
||||||
|
&room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// 2. Let the room creator join
|
// 2. Let the room creator join
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomMember,
|
.timeline
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
.build_and_append_pdu(
|
||||||
membership: MembershipState::Join,
|
PduBuilder {
|
||||||
displayname: services().users.displayname(sender_user)?,
|
event_type: TimelineEventType::RoomMember,
|
||||||
avatar_url: services().users.avatar_url(sender_user)?,
|
content: to_raw_value(&RoomMemberEventContent {
|
||||||
is_direct: Some(body.is_direct),
|
membership: MembershipState::Join,
|
||||||
third_party_invite: None,
|
displayname: services().users.displayname(sender_user)?,
|
||||||
blurhash: services().users.blurhash(sender_user)?,
|
avatar_url: services().users.avatar_url(sender_user)?,
|
||||||
reason: None,
|
is_direct: Some(body.is_direct),
|
||||||
join_authorized_via_users_server: None,
|
third_party_invite: None,
|
||||||
})
|
blurhash: services().users.blurhash(sender_user)?,
|
||||||
.expect("event is valid, we just created it"),
|
reason: None,
|
||||||
unsigned: None,
|
join_authorized_via_users_server: None,
|
||||||
state_key: Some(sender_user.to_string()),
|
})
|
||||||
redacts: None,
|
.expect("event is valid, we just created it"),
|
||||||
},
|
unsigned: None,
|
||||||
sender_user,
|
state_key: Some(sender_user.to_string()),
|
||||||
&room_id,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
sender_user,
|
||||||
|
&room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// 3. Power levels
|
// 3. Power levels
|
||||||
|
|
||||||
|
@ -276,30 +284,14 @@ pub async fn create_room_route(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomPowerLevels,
|
.timeline
|
||||||
content: to_raw_value(&power_levels_content)
|
.build_and_append_pdu(
|
||||||
.expect("to_raw_value always works on serde_json::Value"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some("".to_owned()),
|
|
||||||
redacts: None,
|
|
||||||
},
|
|
||||||
sender_user,
|
|
||||||
&room_id,
|
|
||||||
&state_lock,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// 4. Canonical room alias
|
|
||||||
if let Some(room_alias_id) = &alias {
|
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: TimelineEventType::RoomCanonicalAlias,
|
event_type: TimelineEventType::RoomPowerLevels,
|
||||||
content: to_raw_value(&RoomCanonicalAliasEventContent {
|
content: to_raw_value(&power_levels_content)
|
||||||
alias: Some(room_alias_id.to_owned()),
|
.expect("to_raw_value always works on serde_json::Value"),
|
||||||
alt_aliases: vec![],
|
|
||||||
})
|
|
||||||
.expect("We checked that alias earlier, it must be fine"),
|
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
|
@ -307,64 +299,100 @@ pub async fn create_room_route(
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// 4. Canonical room alias
|
||||||
|
if let Some(room_alias_id) = &alias {
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.build_and_append_pdu(
|
||||||
|
PduBuilder {
|
||||||
|
event_type: TimelineEventType::RoomCanonicalAlias,
|
||||||
|
content: to_raw_value(&RoomCanonicalAliasEventContent {
|
||||||
|
alias: Some(room_alias_id.to_owned()),
|
||||||
|
alt_aliases: vec![],
|
||||||
|
})
|
||||||
|
.expect("We checked that alias earlier, it must be fine"),
|
||||||
|
unsigned: None,
|
||||||
|
state_key: Some("".to_owned()),
|
||||||
|
redacts: None,
|
||||||
|
},
|
||||||
|
sender_user,
|
||||||
|
&room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// 5. Events set by preset
|
// 5. Events set by preset
|
||||||
|
|
||||||
// 5.1 Join Rules
|
// 5.1 Join Rules
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomJoinRules,
|
.timeline
|
||||||
content: to_raw_value(&RoomJoinRulesEventContent::new(match preset {
|
.build_and_append_pdu(
|
||||||
RoomPreset::PublicChat => JoinRule::Public,
|
PduBuilder {
|
||||||
// according to spec "invite" is the default
|
event_type: TimelineEventType::RoomJoinRules,
|
||||||
_ => JoinRule::Invite,
|
content: to_raw_value(&RoomJoinRulesEventContent::new(match preset {
|
||||||
}))
|
RoomPreset::PublicChat => JoinRule::Public,
|
||||||
.expect("event is valid, we just created it"),
|
// according to spec "invite" is the default
|
||||||
unsigned: None,
|
_ => JoinRule::Invite,
|
||||||
state_key: Some("".to_owned()),
|
}))
|
||||||
redacts: None,
|
.expect("event is valid, we just created it"),
|
||||||
},
|
unsigned: None,
|
||||||
sender_user,
|
state_key: Some("".to_owned()),
|
||||||
&room_id,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
sender_user,
|
||||||
|
&room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// 5.2 History Visibility
|
// 5.2 History Visibility
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomHistoryVisibility,
|
.timeline
|
||||||
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
|
.build_and_append_pdu(
|
||||||
HistoryVisibility::Shared,
|
PduBuilder {
|
||||||
))
|
event_type: TimelineEventType::RoomHistoryVisibility,
|
||||||
.expect("event is valid, we just created it"),
|
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
|
||||||
unsigned: None,
|
HistoryVisibility::Shared,
|
||||||
state_key: Some("".to_owned()),
|
))
|
||||||
redacts: None,
|
.expect("event is valid, we just created it"),
|
||||||
},
|
unsigned: None,
|
||||||
sender_user,
|
state_key: Some("".to_owned()),
|
||||||
&room_id,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
sender_user,
|
||||||
|
&room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// 5.3 Guest Access
|
// 5.3 Guest Access
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomGuestAccess,
|
.timeline
|
||||||
content: to_raw_value(&RoomGuestAccessEventContent::new(match preset {
|
.build_and_append_pdu(
|
||||||
RoomPreset::PublicChat => GuestAccess::Forbidden,
|
PduBuilder {
|
||||||
_ => GuestAccess::CanJoin,
|
event_type: TimelineEventType::RoomGuestAccess,
|
||||||
}))
|
content: to_raw_value(&RoomGuestAccessEventContent::new(match preset {
|
||||||
.expect("event is valid, we just created it"),
|
RoomPreset::PublicChat => GuestAccess::Forbidden,
|
||||||
unsigned: None,
|
_ => GuestAccess::CanJoin,
|
||||||
state_key: Some("".to_owned()),
|
}))
|
||||||
redacts: None,
|
.expect("event is valid, we just created it"),
|
||||||
},
|
unsigned: None,
|
||||||
sender_user,
|
state_key: Some("".to_owned()),
|
||||||
&room_id,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
sender_user,
|
||||||
|
&room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// 6. Events listed in initial_state
|
// 6. Events listed in initial_state
|
||||||
for event in &body.initial_state {
|
for event in &body.initial_state {
|
||||||
|
@ -383,47 +411,54 @@ pub async fn create_room_route(
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
services()
|
||||||
pdu_builder,
|
.rooms
|
||||||
sender_user,
|
.timeline
|
||||||
&room_id,
|
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)
|
||||||
&state_lock,
|
.await?;
|
||||||
)?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// 7. Events implied by name and topic
|
// 7. Events implied by name and topic
|
||||||
if let Some(name) = &body.name {
|
if let Some(name) = &body.name {
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomName,
|
.timeline
|
||||||
content: to_raw_value(&RoomNameEventContent::new(Some(name.clone())))
|
.build_and_append_pdu(
|
||||||
.expect("event is valid, we just created it"),
|
PduBuilder {
|
||||||
unsigned: None,
|
event_type: TimelineEventType::RoomName,
|
||||||
state_key: Some("".to_owned()),
|
content: to_raw_value(&RoomNameEventContent::new(name.clone()))
|
||||||
redacts: None,
|
.expect("event is valid, we just created it"),
|
||||||
},
|
unsigned: None,
|
||||||
sender_user,
|
state_key: Some("".to_owned()),
|
||||||
&room_id,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
sender_user,
|
||||||
|
&room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(topic) = &body.topic {
|
if let Some(topic) = &body.topic {
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomTopic,
|
.timeline
|
||||||
content: to_raw_value(&RoomTopicEventContent {
|
.build_and_append_pdu(
|
||||||
topic: topic.clone(),
|
PduBuilder {
|
||||||
})
|
event_type: TimelineEventType::RoomTopic,
|
||||||
.expect("event is valid, we just created it"),
|
content: to_raw_value(&RoomTopicEventContent {
|
||||||
unsigned: None,
|
topic: topic.clone(),
|
||||||
state_key: Some("".to_owned()),
|
})
|
||||||
redacts: None,
|
.expect("event is valid, we just created it"),
|
||||||
},
|
unsigned: None,
|
||||||
sender_user,
|
state_key: Some("".to_owned()),
|
||||||
&room_id,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
sender_user,
|
||||||
|
&room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// 8. Events implied by invite (and TODO: invite_3pid)
|
// 8. Events implied by invite (and TODO: invite_3pid)
|
||||||
|
@ -553,7 +588,7 @@ pub async fn upgrade_room_route(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.entry(body.room_id.clone())
|
.entry(body.room_id.clone())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
|
@ -561,22 +596,26 @@ pub async fn upgrade_room_route(
|
||||||
|
|
||||||
// Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further
|
// Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further
|
||||||
// Fail if the sender does not have the required permissions
|
// Fail if the sender does not have the required permissions
|
||||||
let tombstone_event_id = services().rooms.timeline.build_and_append_pdu(
|
let tombstone_event_id = services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomTombstone,
|
.timeline
|
||||||
content: to_raw_value(&RoomTombstoneEventContent {
|
.build_and_append_pdu(
|
||||||
body: "This room has been replaced".to_owned(),
|
PduBuilder {
|
||||||
replacement_room: replacement_room.clone(),
|
event_type: TimelineEventType::RoomTombstone,
|
||||||
})
|
content: to_raw_value(&RoomTombstoneEventContent {
|
||||||
.expect("event is valid, we just created it"),
|
body: "This room has been replaced".to_owned(),
|
||||||
unsigned: None,
|
replacement_room: replacement_room.clone(),
|
||||||
state_key: Some("".to_owned()),
|
})
|
||||||
redacts: None,
|
.expect("event is valid, we just created it"),
|
||||||
},
|
unsigned: None,
|
||||||
sender_user,
|
state_key: Some("".to_owned()),
|
||||||
&body.room_id,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// Change lock to replacement room
|
// Change lock to replacement room
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
|
@ -585,7 +624,7 @@ pub async fn upgrade_room_route(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.entry(replacement_room.clone())
|
.entry(replacement_room.clone())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
|
@ -661,43 +700,51 @@ pub async fn upgrade_room_route(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomCreate,
|
.timeline
|
||||||
content: to_raw_value(&create_event_content)
|
.build_and_append_pdu(
|
||||||
.expect("event is valid, we just created it"),
|
PduBuilder {
|
||||||
unsigned: None,
|
event_type: TimelineEventType::RoomCreate,
|
||||||
state_key: Some("".to_owned()),
|
content: to_raw_value(&create_event_content)
|
||||||
redacts: None,
|
.expect("event is valid, we just created it"),
|
||||||
},
|
unsigned: None,
|
||||||
sender_user,
|
state_key: Some("".to_owned()),
|
||||||
&replacement_room,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
sender_user,
|
||||||
|
&replacement_room,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// Join the new room
|
// Join the new room
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomMember,
|
.timeline
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
.build_and_append_pdu(
|
||||||
membership: MembershipState::Join,
|
PduBuilder {
|
||||||
displayname: services().users.displayname(sender_user)?,
|
event_type: TimelineEventType::RoomMember,
|
||||||
avatar_url: services().users.avatar_url(sender_user)?,
|
content: to_raw_value(&RoomMemberEventContent {
|
||||||
is_direct: None,
|
membership: MembershipState::Join,
|
||||||
third_party_invite: None,
|
displayname: services().users.displayname(sender_user)?,
|
||||||
blurhash: services().users.blurhash(sender_user)?,
|
avatar_url: services().users.avatar_url(sender_user)?,
|
||||||
reason: None,
|
is_direct: None,
|
||||||
join_authorized_via_users_server: None,
|
third_party_invite: None,
|
||||||
})
|
blurhash: services().users.blurhash(sender_user)?,
|
||||||
.expect("event is valid, we just created it"),
|
reason: None,
|
||||||
unsigned: None,
|
join_authorized_via_users_server: None,
|
||||||
state_key: Some(sender_user.to_string()),
|
})
|
||||||
redacts: None,
|
.expect("event is valid, we just created it"),
|
||||||
},
|
unsigned: None,
|
||||||
sender_user,
|
state_key: Some(sender_user.to_string()),
|
||||||
&replacement_room,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
sender_user,
|
||||||
|
&replacement_room,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// Recommended transferable state events list from the specs
|
// Recommended transferable state events list from the specs
|
||||||
let transferable_state_events = vec![
|
let transferable_state_events = vec![
|
||||||
|
@ -724,18 +771,22 @@ pub async fn upgrade_room_route(
|
||||||
None => continue, // Skipping missing events.
|
None => continue, // Skipping missing events.
|
||||||
};
|
};
|
||||||
|
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: event_type.to_string().into(),
|
.timeline
|
||||||
content: event_content,
|
.build_and_append_pdu(
|
||||||
unsigned: None,
|
PduBuilder {
|
||||||
state_key: Some("".to_owned()),
|
event_type: event_type.to_string().into(),
|
||||||
redacts: None,
|
content: event_content,
|
||||||
},
|
unsigned: None,
|
||||||
sender_user,
|
state_key: Some("".to_owned()),
|
||||||
&replacement_room,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
sender_user,
|
||||||
|
&replacement_room,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Moves any local aliases to the new room
|
// Moves any local aliases to the new room
|
||||||
|
@ -769,19 +820,23 @@ pub async fn upgrade_room_route(
|
||||||
power_levels_event_content.invite = new_level;
|
power_levels_event_content.invite = new_level;
|
||||||
|
|
||||||
// Modify the power levels in the old room to prevent sending of events and inviting new users
|
// Modify the power levels in the old room to prevent sending of events and inviting new users
|
||||||
let _ = services().rooms.timeline.build_and_append_pdu(
|
let _ = services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomPowerLevels,
|
.timeline
|
||||||
content: to_raw_value(&power_levels_event_content)
|
.build_and_append_pdu(
|
||||||
.expect("event is valid, we just created it"),
|
PduBuilder {
|
||||||
unsigned: None,
|
event_type: TimelineEventType::RoomPowerLevels,
|
||||||
state_key: Some("".to_owned()),
|
content: to_raw_value(&power_levels_event_content)
|
||||||
redacts: None,
|
.expect("event is valid, we just created it"),
|
||||||
},
|
unsigned: None,
|
||||||
sender_user,
|
state_key: Some("".to_owned()),
|
||||||
&body.room_id,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
|
|
||||||
|
|
|
@ -42,24 +42,31 @@ pub async fn get_login_types_route(
|
||||||
/// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see
|
/// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see
|
||||||
/// supported login types.
|
/// supported login types.
|
||||||
pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Response> {
|
pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Response> {
|
||||||
|
// To allow deprecated login methods
|
||||||
|
#![allow(deprecated)]
|
||||||
// Validate login method
|
// Validate login method
|
||||||
// TODO: Other login methods
|
// TODO: Other login methods
|
||||||
let user_id = match &body.login_info {
|
let user_id = match &body.login_info {
|
||||||
login::v3::LoginInfo::Password(login::v3::Password {
|
login::v3::LoginInfo::Password(login::v3::Password {
|
||||||
identifier,
|
identifier,
|
||||||
password,
|
password,
|
||||||
|
user,
|
||||||
|
address: _,
|
||||||
|
medium: _,
|
||||||
}) => {
|
}) => {
|
||||||
let username = if let UserIdentifier::UserIdOrLocalpart(user_id) = identifier {
|
let user_id = if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
|
||||||
user_id.to_lowercase()
|
UserId::parse_with_server_name(
|
||||||
|
user_id.to_lowercase(),
|
||||||
|
services().globals.server_name(),
|
||||||
|
)
|
||||||
|
} else if let Some(user) = user {
|
||||||
|
UserId::parse(user)
|
||||||
} else {
|
} else {
|
||||||
warn!("Bad login type: {:?}", &body.login_info);
|
warn!("Bad login type: {:?}", &body.login_info);
|
||||||
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
|
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
|
||||||
};
|
}
|
||||||
let user_id =
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;
|
||||||
UserId::parse_with_server_name(username, services().globals.server_name())
|
|
||||||
.map_err(|_| {
|
|
||||||
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
|
|
||||||
})?;
|
|
||||||
let hash = services()
|
let hash = services()
|
||||||
.users
|
.users
|
||||||
.password_hash(&user_id)?
|
.password_hash(&user_id)?
|
||||||
|
@ -105,24 +112,28 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
login::v3::LoginInfo::ApplicationService(login::v3::ApplicationService { identifier }) => {
|
login::v3::LoginInfo::ApplicationService(login::v3::ApplicationService {
|
||||||
|
identifier,
|
||||||
|
user,
|
||||||
|
}) => {
|
||||||
if !body.from_appservice {
|
if !body.from_appservice {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::MissingToken,
|
||||||
"Forbidden login type.",
|
"Missing appservice token.",
|
||||||
));
|
));
|
||||||
};
|
};
|
||||||
let username = if let UserIdentifier::UserIdOrLocalpart(user_id) = identifier {
|
if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
|
||||||
user_id.to_lowercase()
|
UserId::parse_with_server_name(
|
||||||
|
user_id.to_lowercase(),
|
||||||
|
services().globals.server_name(),
|
||||||
|
)
|
||||||
|
} else if let Some(user) = user {
|
||||||
|
UserId::parse(user)
|
||||||
} else {
|
} else {
|
||||||
|
warn!("Bad login type: {:?}", &body.login_info);
|
||||||
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
|
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
|
||||||
};
|
}
|
||||||
let user_id =
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?
|
||||||
UserId::parse_with_server_name(username, services().globals.server_name())
|
|
||||||
.map_err(|_| {
|
|
||||||
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
|
|
||||||
})?;
|
|
||||||
user_id
|
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
warn!("Unsupported or unknown login type: {:?}", &body.login_info);
|
warn!("Unsupported or unknown login type: {:?}", &body.login_info);
|
||||||
|
@ -163,6 +174,8 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
||||||
|
|
||||||
info!("{} logged in", user_id);
|
info!("{} logged in", user_id);
|
||||||
|
|
||||||
|
// Homeservers are still required to send the `home_server` field
|
||||||
|
#[allow(deprecated)]
|
||||||
Ok(login::v3::Response {
|
Ok(login::v3::Response {
|
||||||
user_id,
|
user_id,
|
||||||
access_token: token,
|
access_token: token,
|
||||||
|
|
|
@ -85,7 +85,7 @@ pub async fn get_state_events_route(
|
||||||
if !services()
|
if !services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.user_can_see_state_events(&sender_user, &body.room_id)?
|
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
|
@ -118,7 +118,7 @@ pub async fn get_state_events_for_key_route(
|
||||||
if !services()
|
if !services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.user_can_see_state_events(&sender_user, &body.room_id)?
|
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
|
@ -157,7 +157,7 @@ pub async fn get_state_events_for_empty_key_route(
|
||||||
if !services()
|
if !services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.user_can_see_state_events(&sender_user, &body.room_id)?
|
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
|
@ -227,24 +227,28 @@ async fn send_state_event_for_key_helper(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.entry(room_id.to_owned())
|
.entry(room_id.to_owned())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
let event_id = services().rooms.timeline.build_and_append_pdu(
|
let event_id = services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: event_type.to_string().into(),
|
.timeline
|
||||||
content: serde_json::from_str(json.json().get()).expect("content is valid json"),
|
.build_and_append_pdu(
|
||||||
unsigned: None,
|
PduBuilder {
|
||||||
state_key: Some(state_key),
|
event_type: event_type.to_string().into(),
|
||||||
redacts: None,
|
content: serde_json::from_str(json.json().get()).expect("content is valid json"),
|
||||||
},
|
unsigned: None,
|
||||||
sender_user,
|
state_key: Some(state_key),
|
||||||
room_id,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
sender_user,
|
||||||
|
room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
Ok(event_id)
|
Ok(event_id)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
use crate::{
|
use crate::{
|
||||||
service::rooms::timeline::PduCount, services, Error, PduEvent, Result, Ruma, RumaResponse,
|
service::rooms::timeline::PduCount, services, Error, PduEvent, Result, Ruma, RumaResponse,
|
||||||
};
|
};
|
||||||
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
filter::{FilterDefinition, LazyLoadOptions},
|
filter::{FilterDefinition, LazyLoadOptions},
|
||||||
|
@ -20,7 +21,7 @@ use ruma::{
|
||||||
StateEventType, TimelineEventType,
|
StateEventType, TimelineEventType,
|
||||||
},
|
},
|
||||||
serde::Raw,
|
serde::Raw,
|
||||||
uint, DeviceId, OwnedDeviceId, OwnedUserId, RoomId, UInt, UserId,
|
uint, DeviceId, JsOption, OwnedDeviceId, OwnedUserId, RoomId, UInt, UserId,
|
||||||
};
|
};
|
||||||
use std::{
|
use std::{
|
||||||
collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet},
|
collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet},
|
||||||
|
@ -28,7 +29,7 @@ use std::{
|
||||||
time::Duration,
|
time::Duration,
|
||||||
};
|
};
|
||||||
use tokio::sync::watch::Sender;
|
use tokio::sync::watch::Sender;
|
||||||
use tracing::error;
|
use tracing::{error, info};
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/sync`
|
/// # `GET /_matrix/client/r0/sync`
|
||||||
///
|
///
|
||||||
|
@ -75,7 +76,7 @@ pub async fn sync_events_route(
|
||||||
.globals
|
.globals
|
||||||
.sync_receivers
|
.sync_receivers
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.entry((sender_user.clone(), sender_device.clone()))
|
.entry((sender_user.clone(), sender_device.clone()))
|
||||||
{
|
{
|
||||||
Entry::Vacant(v) => {
|
Entry::Vacant(v) => {
|
||||||
|
@ -98,6 +99,8 @@ pub async fn sync_events_route(
|
||||||
|
|
||||||
o.insert((body.since.clone(), rx.clone()));
|
o.insert((body.since.clone(), rx.clone()));
|
||||||
|
|
||||||
|
info!("Sync started for {sender_user}");
|
||||||
|
|
||||||
tokio::spawn(sync_helper_wrapper(
|
tokio::spawn(sync_helper_wrapper(
|
||||||
sender_user.clone(),
|
sender_user.clone(),
|
||||||
sender_device.clone(),
|
sender_device.clone(),
|
||||||
|
@ -147,7 +150,7 @@ async fn sync_helper_wrapper(
|
||||||
.globals
|
.globals
|
||||||
.sync_receivers
|
.sync_receivers
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.entry((sender_user, sender_device))
|
.entry((sender_user, sender_device))
|
||||||
{
|
{
|
||||||
Entry::Occupied(o) => {
|
Entry::Occupied(o) => {
|
||||||
|
@ -302,11 +305,11 @@ async fn sync_helper(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_insert
|
.roomid_mutex_insert
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.entry(room_id.clone())
|
.entry(room_id.clone())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
let insert_lock = mutex_insert.lock().unwrap();
|
let insert_lock = mutex_insert.lock().await;
|
||||||
drop(insert_lock);
|
drop(insert_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -434,11 +437,11 @@ async fn sync_helper(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_insert
|
.roomid_mutex_insert
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.entry(room_id.clone())
|
.entry(room_id.clone())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
let insert_lock = mutex_insert.lock().unwrap();
|
let insert_lock = mutex_insert.lock().await;
|
||||||
drop(insert_lock);
|
drop(insert_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -554,6 +557,7 @@ async fn sync_helper(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
async fn load_joined_room(
|
async fn load_joined_room(
|
||||||
sender_user: &UserId,
|
sender_user: &UserId,
|
||||||
sender_device: &DeviceId,
|
sender_device: &DeviceId,
|
||||||
|
@ -576,11 +580,11 @@ async fn load_joined_room(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_insert
|
.roomid_mutex_insert
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.entry(room_id.to_owned())
|
.entry(room_id.to_owned())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
let insert_lock = mutex_insert.lock().unwrap();
|
let insert_lock = mutex_insert.lock().await;
|
||||||
drop(insert_lock);
|
drop(insert_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -590,7 +594,7 @@ async fn load_joined_room(
|
||||||
|| services()
|
|| services()
|
||||||
.rooms
|
.rooms
|
||||||
.user
|
.user
|
||||||
.last_notification_read(&sender_user, &room_id)?
|
.last_notification_read(sender_user, room_id)?
|
||||||
> since;
|
> since;
|
||||||
|
|
||||||
let mut timeline_users = HashSet::new();
|
let mut timeline_users = HashSet::new();
|
||||||
|
@ -598,17 +602,16 @@ async fn load_joined_room(
|
||||||
timeline_users.insert(event.sender.as_str().to_owned());
|
timeline_users.insert(event.sender.as_str().to_owned());
|
||||||
}
|
}
|
||||||
|
|
||||||
services().rooms.lazy_loading.lazy_load_confirm_delivery(
|
services()
|
||||||
&sender_user,
|
.rooms
|
||||||
&sender_device,
|
.lazy_loading
|
||||||
&room_id,
|
.lazy_load_confirm_delivery(sender_user, sender_device, room_id, sincecount)
|
||||||
sincecount,
|
.await?;
|
||||||
)?;
|
|
||||||
|
|
||||||
// Database queries:
|
// Database queries:
|
||||||
|
|
||||||
let current_shortstatehash =
|
let current_shortstatehash =
|
||||||
if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? {
|
if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? {
|
||||||
s
|
s
|
||||||
} else {
|
} else {
|
||||||
error!("Room {} has no state", room_id);
|
error!("Room {} has no state", room_id);
|
||||||
|
@ -618,7 +621,7 @@ async fn load_joined_room(
|
||||||
let since_shortstatehash = services()
|
let since_shortstatehash = services()
|
||||||
.rooms
|
.rooms
|
||||||
.user
|
.user
|
||||||
.get_token_shortstatehash(&room_id, since)?;
|
.get_token_shortstatehash(room_id, since)?;
|
||||||
|
|
||||||
let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) =
|
let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) =
|
||||||
if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) {
|
if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) {
|
||||||
|
@ -630,12 +633,12 @@ async fn load_joined_room(
|
||||||
let joined_member_count = services()
|
let joined_member_count = services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.room_joined_count(&room_id)?
|
.room_joined_count(room_id)?
|
||||||
.unwrap_or(0);
|
.unwrap_or(0);
|
||||||
let invited_member_count = services()
|
let invited_member_count = services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.room_invited_count(&room_id)?
|
.room_invited_count(room_id)?
|
||||||
.unwrap_or(0);
|
.unwrap_or(0);
|
||||||
|
|
||||||
// Recalculate heroes (first 5 members)
|
// Recalculate heroes (first 5 members)
|
||||||
|
@ -648,7 +651,7 @@ async fn load_joined_room(
|
||||||
for hero in services()
|
for hero in services()
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
.all_pdus(&sender_user, &room_id)?
|
.all_pdus(sender_user, room_id)?
|
||||||
.filter_map(|pdu| pdu.ok()) // Ignore all broken pdus
|
.filter_map(|pdu| pdu.ok()) // Ignore all broken pdus
|
||||||
.filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember)
|
.filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember)
|
||||||
.map(|(_, pdu)| {
|
.map(|(_, pdu)| {
|
||||||
|
@ -669,11 +672,11 @@ async fn load_joined_room(
|
||||||
) && (services()
|
) && (services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.is_joined(&user_id, &room_id)?
|
.is_joined(&user_id, room_id)?
|
||||||
|| services()
|
|| services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.is_invited(&user_id, &room_id)?)
|
.is_invited(&user_id, room_id)?)
|
||||||
{
|
{
|
||||||
Ok::<_, Error>(Some(state_key.clone()))
|
Ok::<_, Error>(Some(state_key.clone()))
|
||||||
} else {
|
} else {
|
||||||
|
@ -789,20 +792,24 @@ async fn load_joined_room(
|
||||||
|
|
||||||
// Reset lazy loading because this is an initial sync
|
// Reset lazy loading because this is an initial sync
|
||||||
services().rooms.lazy_loading.lazy_load_reset(
|
services().rooms.lazy_loading.lazy_load_reset(
|
||||||
&sender_user,
|
sender_user,
|
||||||
&sender_device,
|
sender_device,
|
||||||
&room_id,
|
room_id,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// The state_events above should contain all timeline_users, let's mark them as lazy
|
// The state_events above should contain all timeline_users, let's mark them as lazy
|
||||||
// loaded.
|
// loaded.
|
||||||
services().rooms.lazy_loading.lazy_load_mark_sent(
|
services()
|
||||||
&sender_user,
|
.rooms
|
||||||
&sender_device,
|
.lazy_loading
|
||||||
&room_id,
|
.lazy_load_mark_sent(
|
||||||
lazy_loaded,
|
sender_user,
|
||||||
next_batchcount,
|
sender_device,
|
||||||
);
|
room_id,
|
||||||
|
lazy_loaded,
|
||||||
|
next_batchcount,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
(
|
(
|
||||||
heroes,
|
heroes,
|
||||||
|
@ -866,14 +873,14 @@ async fn load_joined_room(
|
||||||
}
|
}
|
||||||
|
|
||||||
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
||||||
&sender_user,
|
sender_user,
|
||||||
&sender_device,
|
sender_device,
|
||||||
&room_id,
|
room_id,
|
||||||
&event.sender,
|
&event.sender,
|
||||||
)? || lazy_load_send_redundant
|
)? || lazy_load_send_redundant
|
||||||
{
|
{
|
||||||
if let Some(member_event) = services().rooms.state_accessor.room_state_get(
|
if let Some(member_event) = services().rooms.state_accessor.room_state_get(
|
||||||
&room_id,
|
room_id,
|
||||||
&StateEventType::RoomMember,
|
&StateEventType::RoomMember,
|
||||||
event.sender.as_str(),
|
event.sender.as_str(),
|
||||||
)? {
|
)? {
|
||||||
|
@ -883,13 +890,17 @@ async fn load_joined_room(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
services().rooms.lazy_loading.lazy_load_mark_sent(
|
services()
|
||||||
&sender_user,
|
.rooms
|
||||||
&sender_device,
|
.lazy_loading
|
||||||
&room_id,
|
.lazy_load_mark_sent(
|
||||||
lazy_loaded,
|
sender_user,
|
||||||
next_batchcount,
|
sender_device,
|
||||||
);
|
room_id,
|
||||||
|
lazy_loaded,
|
||||||
|
next_batchcount,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
let encrypted_room = services()
|
let encrypted_room = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -934,7 +945,7 @@ async fn load_joined_room(
|
||||||
match new_membership {
|
match new_membership {
|
||||||
MembershipState::Join => {
|
MembershipState::Join => {
|
||||||
// A new user joined an encrypted room
|
// A new user joined an encrypted room
|
||||||
if !share_encrypted_room(&sender_user, &user_id, &room_id)? {
|
if !share_encrypted_room(sender_user, &user_id, room_id)? {
|
||||||
device_list_updates.insert(user_id);
|
device_list_updates.insert(user_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -954,15 +965,15 @@ async fn load_joined_room(
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.room_members(&room_id)
|
.room_members(room_id)
|
||||||
.flatten()
|
.flatten()
|
||||||
.filter(|user_id| {
|
.filter(|user_id| {
|
||||||
// Don't send key updates from the sender to the sender
|
// Don't send key updates from the sender to the sender
|
||||||
&sender_user != user_id
|
sender_user != user_id
|
||||||
})
|
})
|
||||||
.filter(|user_id| {
|
.filter(|user_id| {
|
||||||
// Only send keys if the sender doesn't share an encrypted room with the target already
|
// Only send keys if the sender doesn't share an encrypted room with the target already
|
||||||
!share_encrypted_room(&sender_user, user_id, &room_id)
|
!share_encrypted_room(sender_user, user_id, room_id)
|
||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
|
@ -997,7 +1008,7 @@ async fn load_joined_room(
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.user
|
.user
|
||||||
.notification_count(&sender_user, &room_id)?
|
.notification_count(sender_user, room_id)?
|
||||||
.try_into()
|
.try_into()
|
||||||
.expect("notification count can't go that high"),
|
.expect("notification count can't go that high"),
|
||||||
)
|
)
|
||||||
|
@ -1010,7 +1021,7 @@ async fn load_joined_room(
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.user
|
.user
|
||||||
.highlight_count(&sender_user, &room_id)?
|
.highlight_count(sender_user, room_id)?
|
||||||
.try_into()
|
.try_into()
|
||||||
.expect("highlight count can't go that high"),
|
.expect("highlight count can't go that high"),
|
||||||
)
|
)
|
||||||
|
@ -1039,15 +1050,22 @@ async fn load_joined_room(
|
||||||
.rooms
|
.rooms
|
||||||
.edus
|
.edus
|
||||||
.read_receipt
|
.read_receipt
|
||||||
.readreceipts_since(&room_id, since)
|
.readreceipts_since(room_id, since)
|
||||||
.filter_map(|r| r.ok()) // Filter out buggy events
|
.filter_map(|r| r.ok()) // Filter out buggy events
|
||||||
.map(|(_, _, v)| v)
|
.map(|(_, _, v)| v)
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
if services().rooms.edus.typing.last_typing_update(&room_id)? > since {
|
if services()
|
||||||
|
.rooms
|
||||||
|
.edus
|
||||||
|
.typing
|
||||||
|
.last_typing_update(room_id)
|
||||||
|
.await?
|
||||||
|
> since
|
||||||
|
{
|
||||||
edus.push(
|
edus.push(
|
||||||
serde_json::from_str(
|
serde_json::from_str(
|
||||||
&serde_json::to_string(&services().rooms.edus.typing.typings_all(&room_id)?)
|
&serde_json::to_string(&services().rooms.edus.typing.typings_all(room_id).await?)
|
||||||
.expect("event is valid, we just created it"),
|
.expect("event is valid, we just created it"),
|
||||||
)
|
)
|
||||||
.expect("event is valid, we just created it"),
|
.expect("event is valid, we just created it"),
|
||||||
|
@ -1056,7 +1074,7 @@ async fn load_joined_room(
|
||||||
|
|
||||||
// Save the state after this sync so we can send the correct state diff next sync
|
// Save the state after this sync so we can send the correct state diff next sync
|
||||||
services().rooms.user.associate_token_shortstatehash(
|
services().rooms.user.associate_token_shortstatehash(
|
||||||
&room_id,
|
room_id,
|
||||||
next_batch,
|
next_batch,
|
||||||
current_shortstatehash,
|
current_shortstatehash,
|
||||||
)?;
|
)?;
|
||||||
|
@ -1065,7 +1083,7 @@ async fn load_joined_room(
|
||||||
account_data: RoomAccountData {
|
account_data: RoomAccountData {
|
||||||
events: services()
|
events: services()
|
||||||
.account_data
|
.account_data
|
||||||
.changes_since(Some(&room_id), &sender_user, since)?
|
.changes_since(Some(room_id), sender_user, since)?
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter_map(|(_, v)| {
|
.filter_map(|(_, v)| {
|
||||||
serde_json::from_str(v.json().get())
|
serde_json::from_str(v.json().get())
|
||||||
|
@ -1110,13 +1128,13 @@ fn load_timeline(
|
||||||
if services()
|
if services()
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
.last_timeline_count(&sender_user, &room_id)?
|
.last_timeline_count(sender_user, room_id)?
|
||||||
> roomsincecount
|
> roomsincecount
|
||||||
{
|
{
|
||||||
let mut non_timeline_pdus = services()
|
let mut non_timeline_pdus = services()
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
.pdus_until(&sender_user, &room_id, PduCount::max())?
|
.pdus_until(sender_user, room_id, PduCount::max())?
|
||||||
.filter_map(|r| {
|
.filter_map(|r| {
|
||||||
// Filter out buggy events
|
// Filter out buggy events
|
||||||
if r.is_err() {
|
if r.is_err() {
|
||||||
|
@ -1172,7 +1190,6 @@ fn share_encrypted_room(
|
||||||
pub async fn sync_events_v4_route(
|
pub async fn sync_events_v4_route(
|
||||||
body: Ruma<sync_events::v4::Request>,
|
body: Ruma<sync_events::v4::Request>,
|
||||||
) -> Result<sync_events::v4::Response, RumaResponse<UiaaResponse>> {
|
) -> Result<sync_events::v4::Response, RumaResponse<UiaaResponse>> {
|
||||||
dbg!(&body.body);
|
|
||||||
let sender_user = body.sender_user.expect("user is authenticated");
|
let sender_user = body.sender_user.expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.expect("user is authenticated");
|
let sender_device = body.sender_device.expect("user is authenticated");
|
||||||
let mut body = body.body;
|
let mut body = body.body;
|
||||||
|
@ -1232,7 +1249,7 @@ pub async fn sync_events_v4_route(
|
||||||
|
|
||||||
for room_id in &all_joined_rooms {
|
for room_id in &all_joined_rooms {
|
||||||
let current_shortstatehash =
|
let current_shortstatehash =
|
||||||
if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? {
|
if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? {
|
||||||
s
|
s
|
||||||
} else {
|
} else {
|
||||||
error!("Room {} has no state", room_id);
|
error!("Room {} has no state", room_id);
|
||||||
|
@ -1242,7 +1259,7 @@ pub async fn sync_events_v4_route(
|
||||||
let since_shortstatehash = services()
|
let since_shortstatehash = services()
|
||||||
.rooms
|
.rooms
|
||||||
.user
|
.user
|
||||||
.get_token_shortstatehash(&room_id, globalsince)?;
|
.get_token_shortstatehash(room_id, globalsince)?;
|
||||||
|
|
||||||
let since_sender_member: Option<RoomMemberEventContent> = since_shortstatehash
|
let since_sender_member: Option<RoomMemberEventContent> = since_shortstatehash
|
||||||
.and_then(|shortstatehash| {
|
.and_then(|shortstatehash| {
|
||||||
|
@ -1331,7 +1348,7 @@ pub async fn sync_events_v4_route(
|
||||||
if !share_encrypted_room(
|
if !share_encrypted_room(
|
||||||
&sender_user,
|
&sender_user,
|
||||||
&user_id,
|
&user_id,
|
||||||
&room_id,
|
room_id,
|
||||||
)? {
|
)? {
|
||||||
device_list_changes.insert(user_id);
|
device_list_changes.insert(user_id);
|
||||||
}
|
}
|
||||||
|
@ -1352,7 +1369,7 @@ pub async fn sync_events_v4_route(
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.room_members(&room_id)
|
.room_members(room_id)
|
||||||
.flatten()
|
.flatten()
|
||||||
.filter(|user_id| {
|
.filter(|user_id| {
|
||||||
// Don't send key updates from the sender to the sender
|
// Don't send key updates from the sender to the sender
|
||||||
|
@ -1360,7 +1377,7 @@ pub async fn sync_events_v4_route(
|
||||||
})
|
})
|
||||||
.filter(|user_id| {
|
.filter(|user_id| {
|
||||||
// Only send keys if the sender doesn't share an encrypted room with the target already
|
// Only send keys if the sender doesn't share an encrypted room with the target already
|
||||||
!share_encrypted_room(&sender_user, user_id, &room_id)
|
!share_encrypted_room(&sender_user, user_id, room_id)
|
||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
|
@ -1451,7 +1468,7 @@ pub async fn sync_events_v4_route(
|
||||||
}
|
}
|
||||||
sync_events::v4::SyncOp {
|
sync_events::v4::SyncOp {
|
||||||
op: SlidingOp::Sync,
|
op: SlidingOp::Sync,
|
||||||
range: Some(r.clone()),
|
range: Some(r),
|
||||||
index: None,
|
index: None,
|
||||||
room_ids,
|
room_ids,
|
||||||
room_id: None,
|
room_id: None,
|
||||||
|
@ -1476,6 +1493,9 @@ pub async fn sync_events_v4_route(
|
||||||
|
|
||||||
let mut known_subscription_rooms = BTreeSet::new();
|
let mut known_subscription_rooms = BTreeSet::new();
|
||||||
for (room_id, room) in &body.room_subscriptions {
|
for (room_id, room) in &body.room_subscriptions {
|
||||||
|
if !services().rooms.metadata.exists(room_id)? {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
let todo_room = todo_rooms
|
let todo_room = todo_rooms
|
||||||
.entry(room_id.clone())
|
.entry(room_id.clone())
|
||||||
.or_insert((BTreeSet::new(), 0, u64::MAX));
|
.or_insert((BTreeSet::new(), 0, u64::MAX));
|
||||||
|
@ -1523,7 +1543,7 @@ pub async fn sync_events_v4_route(
|
||||||
let roomsincecount = PduCount::Normal(*roomsince);
|
let roomsincecount = PduCount::Normal(*roomsince);
|
||||||
|
|
||||||
let (timeline_pdus, limited) =
|
let (timeline_pdus, limited) =
|
||||||
load_timeline(&sender_user, &room_id, roomsincecount, *timeline_limit)?;
|
load_timeline(&sender_user, room_id, roomsincecount, *timeline_limit)?;
|
||||||
|
|
||||||
if roomsince != &0 && timeline_pdus.is_empty() {
|
if roomsince != &0 && timeline_pdus.is_empty() {
|
||||||
continue;
|
continue;
|
||||||
|
@ -1555,63 +1575,58 @@ pub async fn sync_events_v4_route(
|
||||||
|
|
||||||
let required_state = required_state_request
|
let required_state = required_state_request
|
||||||
.iter()
|
.iter()
|
||||||
.map(|state| {
|
.flat_map(|state| {
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(&room_id, &state.0, &state.1)
|
.room_state_get(room_id, &state.0, &state.1)
|
||||||
|
.ok()
|
||||||
|
.flatten()
|
||||||
|
.map(|state| state.to_sync_state_event())
|
||||||
})
|
})
|
||||||
.filter_map(|r| r.ok())
|
|
||||||
.filter_map(|o| o)
|
|
||||||
.map(|state| state.to_sync_state_event())
|
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
// Heroes
|
// Heroes
|
||||||
let heroes = services()
|
let heroes = services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.room_members(&room_id)
|
.room_members(room_id)
|
||||||
.filter_map(|r| r.ok())
|
.filter_map(|r| r.ok())
|
||||||
.filter(|member| member != &sender_user)
|
.filter(|member| member != &sender_user)
|
||||||
.map(|member| {
|
.flat_map(|member| {
|
||||||
Ok::<_, Error>(
|
services()
|
||||||
services()
|
.rooms
|
||||||
.rooms
|
.state_accessor
|
||||||
.state_accessor
|
.get_member(room_id, &member)
|
||||||
.get_member(&room_id, &member)?
|
.ok()
|
||||||
.map(|memberevent| {
|
.flatten()
|
||||||
(
|
.map(|memberevent| {
|
||||||
memberevent
|
(
|
||||||
.displayname
|
memberevent
|
||||||
.unwrap_or_else(|| member.to_string()),
|
.displayname
|
||||||
memberevent.avatar_url,
|
.unwrap_or_else(|| member.to_string()),
|
||||||
)
|
memberevent.avatar_url,
|
||||||
}),
|
)
|
||||||
)
|
})
|
||||||
})
|
})
|
||||||
.filter_map(|r| r.ok())
|
|
||||||
.filter_map(|o| o)
|
|
||||||
.take(5)
|
.take(5)
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
let name = if heroes.len() > 1 {
|
let name = match &heroes[..] {
|
||||||
let last = heroes[0].0.clone();
|
[] => None,
|
||||||
Some(
|
[only] => Some(only.0.clone()),
|
||||||
heroes[1..]
|
[firsts @ .., last] => Some(
|
||||||
|
firsts
|
||||||
.iter()
|
.iter()
|
||||||
.map(|h| h.0.clone())
|
.map(|h| h.0.clone())
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.join(", ")
|
.join(", ")
|
||||||
+ " and "
|
+ " and "
|
||||||
+ &last,
|
+ &last.0,
|
||||||
)
|
),
|
||||||
} else if heroes.len() == 1 {
|
|
||||||
Some(heroes[0].0.clone())
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let avatar = if heroes.len() == 1 {
|
let avatar = if let [only] = &heroes[..] {
|
||||||
heroes[0].1.clone()
|
only.1.clone()
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
@ -1619,16 +1634,16 @@ pub async fn sync_events_v4_route(
|
||||||
rooms.insert(
|
rooms.insert(
|
||||||
room_id.clone(),
|
room_id.clone(),
|
||||||
sync_events::v4::SlidingSyncRoom {
|
sync_events::v4::SlidingSyncRoom {
|
||||||
name: services()
|
name: services().rooms.state_accessor.get_name(room_id)?.or(name),
|
||||||
.rooms
|
avatar: if let Some(avatar) = avatar {
|
||||||
.state_accessor
|
JsOption::Some(avatar)
|
||||||
.get_name(&room_id)?
|
} else {
|
||||||
.or_else(|| name),
|
match services().rooms.state_accessor.get_avatar(room_id)? {
|
||||||
avatar: services()
|
JsOption::Some(avatar) => JsOption::from_option(avatar.url),
|
||||||
.rooms
|
JsOption::Null => JsOption::Null,
|
||||||
.state_accessor
|
JsOption::Undefined => JsOption::Undefined,
|
||||||
.get_avatar(&room_id)?
|
}
|
||||||
.map_or(avatar, |a| a.url),
|
},
|
||||||
initial: Some(roomsince == &0),
|
initial: Some(roomsince == &0),
|
||||||
is_dm: None,
|
is_dm: None,
|
||||||
invite_state: None,
|
invite_state: None,
|
||||||
|
@ -1637,7 +1652,7 @@ pub async fn sync_events_v4_route(
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.user
|
.user
|
||||||
.highlight_count(&sender_user, &room_id)?
|
.highlight_count(&sender_user, room_id)?
|
||||||
.try_into()
|
.try_into()
|
||||||
.expect("notification count can't go that high"),
|
.expect("notification count can't go that high"),
|
||||||
),
|
),
|
||||||
|
@ -1645,7 +1660,7 @@ pub async fn sync_events_v4_route(
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.user
|
.user
|
||||||
.notification_count(&sender_user, &room_id)?
|
.notification_count(&sender_user, room_id)?
|
||||||
.try_into()
|
.try_into()
|
||||||
.expect("notification count can't go that high"),
|
.expect("notification count can't go that high"),
|
||||||
),
|
),
|
||||||
|
@ -1658,7 +1673,7 @@ pub async fn sync_events_v4_route(
|
||||||
(services()
|
(services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.room_joined_count(&room_id)?
|
.room_joined_count(room_id)?
|
||||||
.unwrap_or(0) as u32)
|
.unwrap_or(0) as u32)
|
||||||
.into(),
|
.into(),
|
||||||
),
|
),
|
||||||
|
@ -1666,7 +1681,7 @@ pub async fn sync_events_v4_route(
|
||||||
(services()
|
(services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.room_invited_count(&room_id)?
|
.room_invited_count(room_id)?
|
||||||
.unwrap_or(0) as u32)
|
.unwrap_or(0) as u32)
|
||||||
.into(),
|
.into(),
|
||||||
),
|
),
|
||||||
|
@ -1689,7 +1704,7 @@ pub async fn sync_events_v4_route(
|
||||||
let _ = tokio::time::timeout(duration, watcher).await;
|
let _ = tokio::time::timeout(duration, watcher).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(dbg!(sync_events::v4::Response {
|
Ok(sync_events::v4::Response {
|
||||||
initial: globalsince == 0,
|
initial: globalsince == 0,
|
||||||
txn_id: body.txn_id.clone(),
|
txn_id: body.txn_id.clone(),
|
||||||
pos: next_batch.to_string(),
|
pos: next_batch.to_string(),
|
||||||
|
@ -1744,5 +1759,5 @@ pub async fn sync_events_v4_route(
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
delta_token: None,
|
delta_token: None,
|
||||||
}))
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,17 +23,23 @@ pub async fn create_typing_event_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Typing::Yes(duration) = body.state {
|
if let Typing::Yes(duration) = body.state {
|
||||||
services().rooms.edus.typing.typing_add(
|
services()
|
||||||
sender_user,
|
.rooms
|
||||||
&body.room_id,
|
.edus
|
||||||
duration.as_millis() as u64 + utils::millis_since_unix_epoch(),
|
.typing
|
||||||
)?;
|
.typing_add(
|
||||||
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
duration.as_millis() as u64 + utils::millis_since_unix_epoch(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
} else {
|
} else {
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.edus
|
.edus
|
||||||
.typing
|
.typing
|
||||||
.typing_remove(sender_user, &body.room_id)?;
|
.typing_remove(sender_user, &body.room_id)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(create_typing_event::v3::Response {})
|
Ok(create_typing_event::v3::Response {})
|
||||||
|
|
|
@ -26,6 +26,7 @@ pub async fn get_supported_versions_route(
|
||||||
"v1.2".to_owned(),
|
"v1.2".to_owned(),
|
||||||
"v1.3".to_owned(),
|
"v1.3".to_owned(),
|
||||||
"v1.4".to_owned(),
|
"v1.4".to_owned(),
|
||||||
|
"v1.5".to_owned(),
|
||||||
],
|
],
|
||||||
unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]),
|
unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]),
|
||||||
};
|
};
|
||||||
|
|
|
@ -48,6 +48,9 @@ pub async fn search_users_route(
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// It's a matching user, but is the sender allowed to see them?
|
||||||
|
let mut user_visible = false;
|
||||||
|
|
||||||
let user_is_in_public_rooms = services()
|
let user_is_in_public_rooms = services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
|
@ -69,22 +72,26 @@ pub async fn search_users_route(
|
||||||
});
|
});
|
||||||
|
|
||||||
if user_is_in_public_rooms {
|
if user_is_in_public_rooms {
|
||||||
return Some(user);
|
user_visible = true;
|
||||||
|
} else {
|
||||||
|
let user_is_in_shared_rooms = services()
|
||||||
|
.rooms
|
||||||
|
.user
|
||||||
|
.get_shared_rooms(vec![sender_user.clone(), user_id])
|
||||||
|
.ok()?
|
||||||
|
.next()
|
||||||
|
.is_some();
|
||||||
|
|
||||||
|
if user_is_in_shared_rooms {
|
||||||
|
user_visible = true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let user_is_in_shared_rooms = services()
|
if !user_visible {
|
||||||
.rooms
|
return None;
|
||||||
.user
|
|
||||||
.get_shared_rooms(vec![sender_user.clone(), user_id])
|
|
||||||
.ok()?
|
|
||||||
.next()
|
|
||||||
.is_some();
|
|
||||||
|
|
||||||
if user_is_in_shared_rooms {
|
|
||||||
return Some(user);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
None
|
Some(user)
|
||||||
});
|
});
|
||||||
|
|
||||||
let results = users.by_ref().take(limit).collect();
|
let results = users.by_ref().take(limit).collect();
|
||||||
|
|
|
@ -15,13 +15,20 @@ use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||||
use http::{Request, StatusCode};
|
use http::{Request, StatusCode};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse},
|
api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse},
|
||||||
CanonicalJsonValue, OwnedDeviceId, OwnedServerName, UserId,
|
CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId,
|
||||||
};
|
};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use tracing::{debug, error, warn};
|
use tracing::{debug, error, warn};
|
||||||
|
|
||||||
use super::{Ruma, RumaResponse};
|
use super::{Ruma, RumaResponse};
|
||||||
use crate::{services, Error, Result};
|
use crate::{service::appservice::RegistrationInfo, services, Error, Result};
|
||||||
|
|
||||||
|
enum Token {
|
||||||
|
Appservice(Box<RegistrationInfo>),
|
||||||
|
User((OwnedUserId, OwnedDeviceId)),
|
||||||
|
Invalid,
|
||||||
|
None,
|
||||||
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T, S, B> FromRequest<S, B> for Ruma<T>
|
impl<T, S, B> FromRequest<S, B> for Ruma<T>
|
||||||
|
@ -78,179 +85,192 @@ where
|
||||||
None => query_params.access_token.as_deref(),
|
None => query_params.access_token.as_deref(),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let token = if let Some(token) = token {
|
||||||
|
if let Some(reg_info) = services().appservice.find_from_token(token).await {
|
||||||
|
Token::Appservice(Box::new(reg_info.clone()))
|
||||||
|
} else if let Some((user_id, device_id)) = services().users.find_from_token(token)? {
|
||||||
|
Token::User((user_id, OwnedDeviceId::from(device_id)))
|
||||||
|
} else {
|
||||||
|
Token::Invalid
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Token::None
|
||||||
|
};
|
||||||
|
|
||||||
let mut json_body = serde_json::from_slice::<CanonicalJsonValue>(&body).ok();
|
let mut json_body = serde_json::from_slice::<CanonicalJsonValue>(&body).ok();
|
||||||
|
|
||||||
let appservices = services().appservice.all().unwrap();
|
|
||||||
let appservice_registration = appservices.iter().find(|(_id, registration)| {
|
|
||||||
registration
|
|
||||||
.get("as_token")
|
|
||||||
.and_then(|as_token| as_token.as_str())
|
|
||||||
.map_or(false, |as_token| token == Some(as_token))
|
|
||||||
});
|
|
||||||
|
|
||||||
let (sender_user, sender_device, sender_servername, from_appservice) =
|
let (sender_user, sender_device, sender_servername, from_appservice) =
|
||||||
if let Some((_id, registration)) = appservice_registration {
|
match (metadata.authentication, token) {
|
||||||
match metadata.authentication {
|
(_, Token::Invalid) => {
|
||||||
AuthScheme::AccessToken => {
|
return Err(Error::BadRequest(
|
||||||
let user_id = query_params.user_id.map_or_else(
|
ErrorKind::UnknownToken { soft_logout: false },
|
||||||
|
"Unknown access token.",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
(
|
||||||
|
AuthScheme::AccessToken
|
||||||
|
| AuthScheme::AppserviceToken
|
||||||
|
| AuthScheme::AccessTokenOptional
|
||||||
|
| AuthScheme::None,
|
||||||
|
Token::Appservice(info),
|
||||||
|
) => {
|
||||||
|
let user_id = query_params
|
||||||
|
.user_id
|
||||||
|
.map_or_else(
|
||||||
|| {
|
|| {
|
||||||
UserId::parse_with_server_name(
|
UserId::parse_with_server_name(
|
||||||
registration
|
info.registration.sender_localpart.as_str(),
|
||||||
.get("sender_localpart")
|
|
||||||
.unwrap()
|
|
||||||
.as_str()
|
|
||||||
.unwrap(),
|
|
||||||
services().globals.server_name(),
|
services().globals.server_name(),
|
||||||
)
|
)
|
||||||
.unwrap()
|
|
||||||
},
|
},
|
||||||
|s| UserId::parse(s).unwrap(),
|
UserId::parse,
|
||||||
);
|
)
|
||||||
|
.map_err(|_| {
|
||||||
|
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
|
||||||
|
})?;
|
||||||
|
if !services().users.exists(&user_id)? {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::Forbidden,
|
||||||
|
"User does not exist.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
if !services().users.exists(&user_id).unwrap() {
|
// TODO: Check if appservice is allowed to be that user
|
||||||
|
(Some(user_id), None, None, true)
|
||||||
|
}
|
||||||
|
(AuthScheme::AccessToken, Token::None) => {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::MissingToken,
|
||||||
|
"Missing access token.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
(
|
||||||
|
AuthScheme::AccessToken | AuthScheme::AccessTokenOptional | AuthScheme::None,
|
||||||
|
Token::User((user_id, device_id)),
|
||||||
|
) => (Some(user_id), Some(device_id), None, false),
|
||||||
|
(AuthScheme::ServerSignatures, Token::None) => {
|
||||||
|
if !services().globals.allow_federation() {
|
||||||
|
return Err(Error::bad_config("Federation is disabled."));
|
||||||
|
}
|
||||||
|
|
||||||
|
let TypedHeader(Authorization(x_matrix)) = parts
|
||||||
|
.extract::<TypedHeader<Authorization<XMatrix>>>()
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
warn!("Missing or invalid Authorization header: {}", e);
|
||||||
|
|
||||||
|
let msg = match e.reason() {
|
||||||
|
TypedHeaderRejectionReason::Missing => {
|
||||||
|
"Missing Authorization header."
|
||||||
|
}
|
||||||
|
TypedHeaderRejectionReason::Error(_) => {
|
||||||
|
"Invalid X-Matrix signatures."
|
||||||
|
}
|
||||||
|
_ => "Unknown header-related error",
|
||||||
|
};
|
||||||
|
|
||||||
|
Error::BadRequest(ErrorKind::Forbidden, msg)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let origin_signatures = BTreeMap::from_iter([(
|
||||||
|
x_matrix.key.clone(),
|
||||||
|
CanonicalJsonValue::String(x_matrix.sig),
|
||||||
|
)]);
|
||||||
|
|
||||||
|
let signatures = BTreeMap::from_iter([(
|
||||||
|
x_matrix.origin.as_str().to_owned(),
|
||||||
|
CanonicalJsonValue::Object(origin_signatures),
|
||||||
|
)]);
|
||||||
|
|
||||||
|
let mut request_map = BTreeMap::from_iter([
|
||||||
|
(
|
||||||
|
"method".to_owned(),
|
||||||
|
CanonicalJsonValue::String(parts.method.to_string()),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"uri".to_owned(),
|
||||||
|
CanonicalJsonValue::String(parts.uri.to_string()),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"origin".to_owned(),
|
||||||
|
CanonicalJsonValue::String(x_matrix.origin.as_str().to_owned()),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"destination".to_owned(),
|
||||||
|
CanonicalJsonValue::String(
|
||||||
|
services().globals.server_name().as_str().to_owned(),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"signatures".to_owned(),
|
||||||
|
CanonicalJsonValue::Object(signatures),
|
||||||
|
),
|
||||||
|
]);
|
||||||
|
|
||||||
|
if let Some(json_body) = &json_body {
|
||||||
|
request_map.insert("content".to_owned(), json_body.clone());
|
||||||
|
};
|
||||||
|
|
||||||
|
let keys_result = services()
|
||||||
|
.rooms
|
||||||
|
.event_handler
|
||||||
|
.fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_owned()])
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let keys = match keys_result {
|
||||||
|
Ok(b) => b,
|
||||||
|
Err(e) => {
|
||||||
|
warn!("Failed to fetch signing keys: {}", e);
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"User does not exist.",
|
"Failed to fetch signing keys.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// TODO: Check if appservice is allowed to be that user
|
let pub_key_map =
|
||||||
(Some(user_id), None, None, true)
|
BTreeMap::from_iter([(x_matrix.origin.as_str().to_owned(), keys)]);
|
||||||
}
|
|
||||||
AuthScheme::ServerSignatures => (None, None, None, true),
|
|
||||||
AuthScheme::None => (None, None, None, true),
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
match metadata.authentication {
|
|
||||||
AuthScheme::AccessToken => {
|
|
||||||
let token = match token {
|
|
||||||
Some(token) => token,
|
|
||||||
_ => {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::MissingToken,
|
|
||||||
"Missing access token.",
|
|
||||||
))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
match services().users.find_from_token(token).unwrap() {
|
match ruma::signatures::verify_json(&pub_key_map, &request_map) {
|
||||||
None => {
|
Ok(()) => (None, None, Some(x_matrix.origin), false),
|
||||||
return Err(Error::BadRequest(
|
Err(e) => {
|
||||||
ErrorKind::UnknownToken { soft_logout: false },
|
warn!(
|
||||||
"Unknown access token.",
|
"Failed to verify json request from {}: {}\n{:?}",
|
||||||
))
|
x_matrix.origin, e, request_map
|
||||||
}
|
);
|
||||||
Some((user_id, device_id)) => (
|
|
||||||
Some(user_id),
|
|
||||||
Some(OwnedDeviceId::from(device_id)),
|
|
||||||
None,
|
|
||||||
false,
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
AuthScheme::ServerSignatures => {
|
|
||||||
let TypedHeader(Authorization(x_matrix)) = parts
|
|
||||||
.extract::<TypedHeader<Authorization<XMatrix>>>()
|
|
||||||
.await
|
|
||||||
.map_err(|e| {
|
|
||||||
warn!("Missing or invalid Authorization header: {}", e);
|
|
||||||
|
|
||||||
let msg = match e.reason() {
|
if parts.uri.to_string().contains('@') {
|
||||||
TypedHeaderRejectionReason::Missing => {
|
|
||||||
"Missing Authorization header."
|
|
||||||
}
|
|
||||||
TypedHeaderRejectionReason::Error(_) => {
|
|
||||||
"Invalid X-Matrix signatures."
|
|
||||||
}
|
|
||||||
_ => "Unknown header-related error",
|
|
||||||
};
|
|
||||||
|
|
||||||
Error::BadRequest(ErrorKind::Forbidden, msg)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let origin_signatures = BTreeMap::from_iter([(
|
|
||||||
x_matrix.key.clone(),
|
|
||||||
CanonicalJsonValue::String(x_matrix.sig),
|
|
||||||
)]);
|
|
||||||
|
|
||||||
let signatures = BTreeMap::from_iter([(
|
|
||||||
x_matrix.origin.as_str().to_owned(),
|
|
||||||
CanonicalJsonValue::Object(origin_signatures),
|
|
||||||
)]);
|
|
||||||
|
|
||||||
let mut request_map = BTreeMap::from_iter([
|
|
||||||
(
|
|
||||||
"method".to_owned(),
|
|
||||||
CanonicalJsonValue::String(parts.method.to_string()),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"uri".to_owned(),
|
|
||||||
CanonicalJsonValue::String(parts.uri.to_string()),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"origin".to_owned(),
|
|
||||||
CanonicalJsonValue::String(x_matrix.origin.as_str().to_owned()),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"destination".to_owned(),
|
|
||||||
CanonicalJsonValue::String(
|
|
||||||
services().globals.server_name().as_str().to_owned(),
|
|
||||||
),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"signatures".to_owned(),
|
|
||||||
CanonicalJsonValue::Object(signatures),
|
|
||||||
),
|
|
||||||
]);
|
|
||||||
|
|
||||||
if let Some(json_body) = &json_body {
|
|
||||||
request_map.insert("content".to_owned(), json_body.clone());
|
|
||||||
};
|
|
||||||
|
|
||||||
let keys_result = services()
|
|
||||||
.rooms
|
|
||||||
.event_handler
|
|
||||||
.fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_owned()])
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let keys = match keys_result {
|
|
||||||
Ok(b) => b,
|
|
||||||
Err(e) => {
|
|
||||||
warn!("Failed to fetch signing keys: {}", e);
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Forbidden,
|
|
||||||
"Failed to fetch signing keys.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let pub_key_map =
|
|
||||||
BTreeMap::from_iter([(x_matrix.origin.as_str().to_owned(), keys)]);
|
|
||||||
|
|
||||||
match ruma::signatures::verify_json(&pub_key_map, &request_map) {
|
|
||||||
Ok(()) => (None, None, Some(x_matrix.origin), false),
|
|
||||||
Err(e) => {
|
|
||||||
warn!(
|
warn!(
|
||||||
"Failed to verify json request from {}: {}\n{:?}",
|
"Request uri contained '@' character. Make sure your \
|
||||||
x_matrix.origin, e, request_map
|
|
||||||
);
|
|
||||||
|
|
||||||
if parts.uri.to_string().contains('@') {
|
|
||||||
warn!(
|
|
||||||
"Request uri contained '@' character. Make sure your \
|
|
||||||
reverse proxy gives Conduit the raw uri (apache: use \
|
reverse proxy gives Conduit the raw uri (apache: use \
|
||||||
nocanon)"
|
nocanon)"
|
||||||
);
|
);
|
||||||
}
|
|
||||||
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Forbidden,
|
|
||||||
"Failed to verify X-Matrix signatures.",
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::Forbidden,
|
||||||
|
"Failed to verify X-Matrix signatures.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
AuthScheme::None => (None, None, None, false),
|
}
|
||||||
|
(
|
||||||
|
AuthScheme::None
|
||||||
|
| AuthScheme::AppserviceToken
|
||||||
|
| AuthScheme::AccessTokenOptional,
|
||||||
|
Token::None,
|
||||||
|
) => (None, None, None, false),
|
||||||
|
(AuthScheme::ServerSignatures, Token::Appservice(_) | Token::User(_)) => {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::Unauthorized,
|
||||||
|
"Only server signatures should be used on this endpoint.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
(AuthScheme::AppserviceToken, Token::User(_)) => {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::Unauthorized,
|
||||||
|
"Only appservice access tokens should be used on this endpoint.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -51,11 +51,12 @@ use std::{
|
||||||
fmt::Debug,
|
fmt::Debug,
|
||||||
mem,
|
mem,
|
||||||
net::{IpAddr, SocketAddr},
|
net::{IpAddr, SocketAddr},
|
||||||
sync::{Arc, RwLock},
|
sync::Arc,
|
||||||
time::{Duration, Instant, SystemTime},
|
time::{Duration, Instant, SystemTime},
|
||||||
};
|
};
|
||||||
|
use tokio::sync::RwLock;
|
||||||
|
|
||||||
use tracing::{debug, error, trace, warn};
|
use tracing::{debug, error, warn};
|
||||||
|
|
||||||
/// Wraps either an literal IP address plus port, or a hostname plus complement
|
/// Wraps either an literal IP address plus port, or a hostname plus complement
|
||||||
/// (colon-plus-port if it was specified).
|
/// (colon-plus-port if it was specified).
|
||||||
|
@ -137,7 +138,7 @@ where
|
||||||
.globals
|
.globals
|
||||||
.actual_destination_cache
|
.actual_destination_cache
|
||||||
.read()
|
.read()
|
||||||
.unwrap()
|
.await
|
||||||
.get(destination)
|
.get(destination)
|
||||||
.cloned();
|
.cloned();
|
||||||
|
|
||||||
|
@ -232,8 +233,7 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let reqwest_request = reqwest::Request::try_from(http_request)
|
let reqwest_request = reqwest::Request::try_from(http_request)?;
|
||||||
.expect("all http requests are valid reqwest requests");
|
|
||||||
|
|
||||||
let url = reqwest_request.url().clone();
|
let url = reqwest_request.url().clone();
|
||||||
|
|
||||||
|
@ -290,7 +290,7 @@ where
|
||||||
.globals
|
.globals
|
||||||
.actual_destination_cache
|
.actual_destination_cache
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.insert(
|
.insert(
|
||||||
OwnedServerName::from(destination),
|
OwnedServerName::from(destination),
|
||||||
(actual_destination, host),
|
(actual_destination, host),
|
||||||
|
@ -341,7 +341,7 @@ fn add_port_to_hostname(destination_str: &str) -> FedDest {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns: actual_destination, host header
|
/// Returns: actual_destination, host header
|
||||||
/// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names
|
/// Implemented according to the specification at <https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names>
|
||||||
/// Numbers in comments below refer to bullet points in linked section of specification
|
/// Numbers in comments below refer to bullet points in linked section of specification
|
||||||
async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDest) {
|
async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDest) {
|
||||||
debug!("Finding actual destination for {destination}");
|
debug!("Finding actual destination for {destination}");
|
||||||
|
@ -475,12 +475,11 @@ async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDe
|
||||||
(actual_destination, hostname)
|
(actual_destination, hostname)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn query_srv_record(hostname: &'_ str) -> Option<FedDest> {
|
async fn query_given_srv_record(record: &str) -> Option<FedDest> {
|
||||||
let hostname = hostname.trim_end_matches('.');
|
services()
|
||||||
if let Ok(Some(host_port)) = services()
|
|
||||||
.globals
|
.globals
|
||||||
.dns_resolver()
|
.dns_resolver()
|
||||||
.srv_lookup(format!("_matrix._tcp.{hostname}."))
|
.srv_lookup(record)
|
||||||
.await
|
.await
|
||||||
.map(|srv| {
|
.map(|srv| {
|
||||||
srv.iter().next().map(|result| {
|
srv.iter().next().map(|result| {
|
||||||
|
@ -490,10 +489,17 @@ async fn query_srv_record(hostname: &'_ str) -> Option<FedDest> {
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
.unwrap_or(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn query_srv_record(hostname: &'_ str) -> Option<FedDest> {
|
||||||
|
let hostname = hostname.trim_end_matches('.');
|
||||||
|
|
||||||
|
if let Some(host_port) = query_given_srv_record(&format!("_matrix-fed._tcp.{hostname}.")).await
|
||||||
{
|
{
|
||||||
Some(host_port)
|
Some(host_port)
|
||||||
} else {
|
} else {
|
||||||
None
|
query_given_srv_record(&format!("_matrix._tcp.{hostname}.")).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -600,10 +606,6 @@ pub async fn get_server_keys_deprecated_route() -> impl IntoResponse {
|
||||||
pub async fn get_public_rooms_filtered_route(
|
pub async fn get_public_rooms_filtered_route(
|
||||||
body: Ruma<get_public_rooms_filtered::v1::Request>,
|
body: Ruma<get_public_rooms_filtered::v1::Request>,
|
||||||
) -> Result<get_public_rooms_filtered::v1::Response> {
|
) -> Result<get_public_rooms_filtered::v1::Response> {
|
||||||
if !services().globals.allow_federation() {
|
|
||||||
return Err(Error::bad_config("Federation is disabled."));
|
|
||||||
}
|
|
||||||
|
|
||||||
let response = client_server::get_public_rooms_filtered_helper(
|
let response = client_server::get_public_rooms_filtered_helper(
|
||||||
None,
|
None,
|
||||||
body.limit,
|
body.limit,
|
||||||
|
@ -627,10 +629,6 @@ pub async fn get_public_rooms_filtered_route(
|
||||||
pub async fn get_public_rooms_route(
|
pub async fn get_public_rooms_route(
|
||||||
body: Ruma<get_public_rooms::v1::Request>,
|
body: Ruma<get_public_rooms::v1::Request>,
|
||||||
) -> Result<get_public_rooms::v1::Response> {
|
) -> Result<get_public_rooms::v1::Response> {
|
||||||
if !services().globals.allow_federation() {
|
|
||||||
return Err(Error::bad_config("Federation is disabled."));
|
|
||||||
}
|
|
||||||
|
|
||||||
let response = client_server::get_public_rooms_filtered_helper(
|
let response = client_server::get_public_rooms_filtered_helper(
|
||||||
None,
|
None,
|
||||||
body.limit,
|
body.limit,
|
||||||
|
@ -666,7 +664,7 @@ pub fn parse_incoming_pdu(
|
||||||
|
|
||||||
let room_version_id = services().rooms.state.get_room_version(&room_id)?;
|
let room_version_id = services().rooms.state.get_room_version(&room_id)?;
|
||||||
|
|
||||||
let (event_id, value) = match gen_event_id_canonical_json(&pdu, &room_version_id) {
|
let (event_id, value) = match gen_event_id_canonical_json(pdu, &room_version_id) {
|
||||||
Ok(t) => t,
|
Ok(t) => t,
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
// Event could not be converted to canonical json
|
// Event could not be converted to canonical json
|
||||||
|
@ -685,10 +683,6 @@ pub fn parse_incoming_pdu(
|
||||||
pub async fn send_transaction_message_route(
|
pub async fn send_transaction_message_route(
|
||||||
body: Ruma<send_transaction_message::v1::Request>,
|
body: Ruma<send_transaction_message::v1::Request>,
|
||||||
) -> Result<send_transaction_message::v1::Response> {
|
) -> Result<send_transaction_message::v1::Response> {
|
||||||
if !services().globals.allow_federation() {
|
|
||||||
return Err(Error::bad_config("Federation is disabled."));
|
|
||||||
}
|
|
||||||
|
|
||||||
let sender_servername = body
|
let sender_servername = body
|
||||||
.sender_servername
|
.sender_servername
|
||||||
.as_ref()
|
.as_ref()
|
||||||
|
@ -724,7 +718,7 @@ pub async fn send_transaction_message_route(
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let r = parse_incoming_pdu(&pdu);
|
let r = parse_incoming_pdu(pdu);
|
||||||
let (event_id, value, room_id) = match r {
|
let (event_id, value, room_id) = match r {
|
||||||
Ok(t) => t,
|
Ok(t) => t,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
|
@ -740,7 +734,7 @@ pub async fn send_transaction_message_route(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_federation
|
.roomid_mutex_federation
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.entry(room_id.to_owned())
|
.entry(room_id.to_owned())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
|
@ -837,17 +831,23 @@ pub async fn send_transaction_message_route(
|
||||||
.is_joined(&typing.user_id, &typing.room_id)?
|
.is_joined(&typing.user_id, &typing.room_id)?
|
||||||
{
|
{
|
||||||
if typing.typing {
|
if typing.typing {
|
||||||
services().rooms.edus.typing.typing_add(
|
services()
|
||||||
&typing.user_id,
|
.rooms
|
||||||
&typing.room_id,
|
.edus
|
||||||
3000 + utils::millis_since_unix_epoch(),
|
.typing
|
||||||
)?;
|
.typing_add(
|
||||||
|
&typing.user_id,
|
||||||
|
&typing.room_id,
|
||||||
|
3000 + utils::millis_since_unix_epoch(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
} else {
|
} else {
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.edus
|
.edus
|
||||||
.typing
|
.typing
|
||||||
.typing_remove(&typing.user_id, &typing.room_id)?;
|
.typing_remove(&typing.user_id, &typing.room_id)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -953,10 +953,6 @@ pub async fn send_transaction_message_route(
|
||||||
pub async fn get_event_route(
|
pub async fn get_event_route(
|
||||||
body: Ruma<get_event::v1::Request>,
|
body: Ruma<get_event::v1::Request>,
|
||||||
) -> Result<get_event::v1::Response> {
|
) -> Result<get_event::v1::Response> {
|
||||||
if !services().globals.allow_federation() {
|
|
||||||
return Err(Error::bad_config("Federation is disabled."));
|
|
||||||
}
|
|
||||||
|
|
||||||
let sender_servername = body
|
let sender_servername = body
|
||||||
.sender_servername
|
.sender_servername
|
||||||
.as_ref()
|
.as_ref()
|
||||||
|
@ -992,7 +988,7 @@ pub async fn get_event_route(
|
||||||
|
|
||||||
if !services().rooms.state_accessor.server_can_see_event(
|
if !services().rooms.state_accessor.server_can_see_event(
|
||||||
sender_servername,
|
sender_servername,
|
||||||
&room_id,
|
room_id,
|
||||||
&body.event_id,
|
&body.event_id,
|
||||||
)? {
|
)? {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
|
@ -1015,10 +1011,6 @@ pub async fn get_event_route(
|
||||||
pub async fn get_backfill_route(
|
pub async fn get_backfill_route(
|
||||||
body: Ruma<get_backfill::v1::Request>,
|
body: Ruma<get_backfill::v1::Request>,
|
||||||
) -> Result<get_backfill::v1::Response> {
|
) -> Result<get_backfill::v1::Response> {
|
||||||
if !services().globals.allow_federation() {
|
|
||||||
return Err(Error::bad_config("Federation is disabled."));
|
|
||||||
}
|
|
||||||
|
|
||||||
let sender_servername = body
|
let sender_servername = body
|
||||||
.sender_servername
|
.sender_servername
|
||||||
.as_ref()
|
.as_ref()
|
||||||
|
@ -1058,7 +1050,7 @@ pub async fn get_backfill_route(
|
||||||
let all_events = services()
|
let all_events = services()
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
.pdus_until(&user_id!("@doesntmatter:conduit.rs"), &body.room_id, until)?
|
.pdus_until(user_id!("@doesntmatter:conduit.rs"), &body.room_id, until)?
|
||||||
.take(limit.try_into().unwrap());
|
.take(limit.try_into().unwrap());
|
||||||
|
|
||||||
let events = all_events
|
let events = all_events
|
||||||
|
@ -1075,7 +1067,7 @@ pub async fn get_backfill_route(
|
||||||
})
|
})
|
||||||
.map(|(_, pdu)| services().rooms.timeline.get_pdu_json(&pdu.event_id))
|
.map(|(_, pdu)| services().rooms.timeline.get_pdu_json(&pdu.event_id))
|
||||||
.filter_map(|r| r.ok().flatten())
|
.filter_map(|r| r.ok().flatten())
|
||||||
.map(|pdu| PduEvent::convert_to_outgoing_federation_event(pdu))
|
.map(PduEvent::convert_to_outgoing_federation_event)
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
Ok(get_backfill::v1::Response {
|
Ok(get_backfill::v1::Response {
|
||||||
|
@ -1091,10 +1083,6 @@ pub async fn get_backfill_route(
|
||||||
pub async fn get_missing_events_route(
|
pub async fn get_missing_events_route(
|
||||||
body: Ruma<get_missing_events::v1::Request>,
|
body: Ruma<get_missing_events::v1::Request>,
|
||||||
) -> Result<get_missing_events::v1::Response> {
|
) -> Result<get_missing_events::v1::Response> {
|
||||||
if !services().globals.allow_federation() {
|
|
||||||
return Err(Error::bad_config("Federation is disabled."));
|
|
||||||
}
|
|
||||||
|
|
||||||
let sender_servername = body
|
let sender_servername = body
|
||||||
.sender_servername
|
.sender_servername
|
||||||
.as_ref()
|
.as_ref()
|
||||||
|
@ -1180,10 +1168,6 @@ pub async fn get_missing_events_route(
|
||||||
pub async fn get_event_authorization_route(
|
pub async fn get_event_authorization_route(
|
||||||
body: Ruma<get_event_authorization::v1::Request>,
|
body: Ruma<get_event_authorization::v1::Request>,
|
||||||
) -> Result<get_event_authorization::v1::Response> {
|
) -> Result<get_event_authorization::v1::Response> {
|
||||||
if !services().globals.allow_federation() {
|
|
||||||
return Err(Error::bad_config("Federation is disabled."));
|
|
||||||
}
|
|
||||||
|
|
||||||
let sender_servername = body
|
let sender_servername = body
|
||||||
.sender_servername
|
.sender_servername
|
||||||
.as_ref()
|
.as_ref()
|
||||||
|
@ -1242,10 +1226,6 @@ pub async fn get_event_authorization_route(
|
||||||
pub async fn get_room_state_route(
|
pub async fn get_room_state_route(
|
||||||
body: Ruma<get_room_state::v1::Request>,
|
body: Ruma<get_room_state::v1::Request>,
|
||||||
) -> Result<get_room_state::v1::Response> {
|
) -> Result<get_room_state::v1::Response> {
|
||||||
if !services().globals.allow_federation() {
|
|
||||||
return Err(Error::bad_config("Federation is disabled."));
|
|
||||||
}
|
|
||||||
|
|
||||||
let sender_servername = body
|
let sender_servername = body
|
||||||
.sender_servername
|
.sender_servername
|
||||||
.as_ref()
|
.as_ref()
|
||||||
|
@ -1322,10 +1302,6 @@ pub async fn get_room_state_route(
|
||||||
pub async fn get_room_state_ids_route(
|
pub async fn get_room_state_ids_route(
|
||||||
body: Ruma<get_room_state_ids::v1::Request>,
|
body: Ruma<get_room_state_ids::v1::Request>,
|
||||||
) -> Result<get_room_state_ids::v1::Response> {
|
) -> Result<get_room_state_ids::v1::Response> {
|
||||||
if !services().globals.allow_federation() {
|
|
||||||
return Err(Error::bad_config("Federation is disabled."));
|
|
||||||
}
|
|
||||||
|
|
||||||
let sender_servername = body
|
let sender_servername = body
|
||||||
.sender_servername
|
.sender_servername
|
||||||
.as_ref()
|
.as_ref()
|
||||||
|
@ -1383,10 +1359,6 @@ pub async fn get_room_state_ids_route(
|
||||||
pub async fn create_join_event_template_route(
|
pub async fn create_join_event_template_route(
|
||||||
body: Ruma<prepare_join_event::v1::Request>,
|
body: Ruma<prepare_join_event::v1::Request>,
|
||||||
) -> Result<prepare_join_event::v1::Response> {
|
) -> Result<prepare_join_event::v1::Response> {
|
||||||
if !services().globals.allow_federation() {
|
|
||||||
return Err(Error::bad_config("Federation is disabled."));
|
|
||||||
}
|
|
||||||
|
|
||||||
if !services().rooms.metadata.exists(&body.room_id)? {
|
if !services().rooms.metadata.exists(&body.room_id)? {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::NotFound,
|
ErrorKind::NotFound,
|
||||||
|
@ -1409,7 +1381,7 @@ pub async fn create_join_event_template_route(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.entry(body.room_id.to_owned())
|
.entry(body.room_id.to_owned())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
|
@ -1494,10 +1466,6 @@ async fn create_join_event(
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
pdu: &RawJsonValue,
|
pdu: &RawJsonValue,
|
||||||
) -> Result<create_join_event::v1::RoomState> {
|
) -> Result<create_join_event::v1::RoomState> {
|
||||||
if !services().globals.allow_federation() {
|
|
||||||
return Err(Error::bad_config("Federation is disabled."));
|
|
||||||
}
|
|
||||||
|
|
||||||
if !services().rooms.metadata.exists(room_id)? {
|
if !services().rooms.metadata.exists(room_id)? {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::NotFound,
|
ErrorKind::NotFound,
|
||||||
|
@ -1579,7 +1547,7 @@ async fn create_join_event(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_federation
|
.roomid_mutex_federation
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.entry(room_id.to_owned())
|
.entry(room_id.to_owned())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
|
@ -1678,10 +1646,6 @@ pub async fn create_join_event_v2_route(
|
||||||
pub async fn create_invite_route(
|
pub async fn create_invite_route(
|
||||||
body: Ruma<create_invite::v2::Request>,
|
body: Ruma<create_invite::v2::Request>,
|
||||||
) -> Result<create_invite::v2::Response> {
|
) -> Result<create_invite::v2::Response> {
|
||||||
if !services().globals.allow_federation() {
|
|
||||||
return Err(Error::bad_config("Federation is disabled."));
|
|
||||||
}
|
|
||||||
|
|
||||||
let sender_servername = body
|
let sender_servername = body
|
||||||
.sender_servername
|
.sender_servername
|
||||||
.as_ref()
|
.as_ref()
|
||||||
|
@ -1795,8 +1759,11 @@ pub async fn create_invite_route(
|
||||||
pub async fn get_devices_route(
|
pub async fn get_devices_route(
|
||||||
body: Ruma<get_devices::v1::Request>,
|
body: Ruma<get_devices::v1::Request>,
|
||||||
) -> Result<get_devices::v1::Response> {
|
) -> Result<get_devices::v1::Response> {
|
||||||
if !services().globals.allow_federation() {
|
if body.user_id.server_name() != services().globals.server_name() {
|
||||||
return Err(Error::bad_config("Federation is disabled."));
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Tried to access user from other server.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let sender_servername = body
|
let sender_servername = body
|
||||||
|
@ -1844,10 +1811,6 @@ pub async fn get_devices_route(
|
||||||
pub async fn get_room_information_route(
|
pub async fn get_room_information_route(
|
||||||
body: Ruma<get_room_information::v1::Request>,
|
body: Ruma<get_room_information::v1::Request>,
|
||||||
) -> Result<get_room_information::v1::Response> {
|
) -> Result<get_room_information::v1::Response> {
|
||||||
if !services().globals.allow_federation() {
|
|
||||||
return Err(Error::bad_config("Federation is disabled."));
|
|
||||||
}
|
|
||||||
|
|
||||||
let room_id = services()
|
let room_id = services()
|
||||||
.rooms
|
.rooms
|
||||||
.alias
|
.alias
|
||||||
|
@ -1869,8 +1832,11 @@ pub async fn get_room_information_route(
|
||||||
pub async fn get_profile_information_route(
|
pub async fn get_profile_information_route(
|
||||||
body: Ruma<get_profile_information::v1::Request>,
|
body: Ruma<get_profile_information::v1::Request>,
|
||||||
) -> Result<get_profile_information::v1::Response> {
|
) -> Result<get_profile_information::v1::Response> {
|
||||||
if !services().globals.allow_federation() {
|
if body.user_id.server_name() != services().globals.server_name() {
|
||||||
return Err(Error::bad_config("Federation is disabled."));
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Tried to access user from other server.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut displayname = None;
|
let mut displayname = None;
|
||||||
|
@ -1905,8 +1871,15 @@ pub async fn get_profile_information_route(
|
||||||
///
|
///
|
||||||
/// Gets devices and identity keys for the given users.
|
/// Gets devices and identity keys for the given users.
|
||||||
pub async fn get_keys_route(body: Ruma<get_keys::v1::Request>) -> Result<get_keys::v1::Response> {
|
pub async fn get_keys_route(body: Ruma<get_keys::v1::Request>) -> Result<get_keys::v1::Response> {
|
||||||
if !services().globals.allow_federation() {
|
if body
|
||||||
return Err(Error::bad_config("Federation is disabled."));
|
.device_keys
|
||||||
|
.iter()
|
||||||
|
.any(|(u, _)| u.server_name() != services().globals.server_name())
|
||||||
|
{
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Tried to access user from other server.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let result = get_keys_helper(None, &body.device_keys, |u| {
|
let result = get_keys_helper(None, &body.device_keys, |u| {
|
||||||
|
@ -1927,8 +1900,15 @@ pub async fn get_keys_route(body: Ruma<get_keys::v1::Request>) -> Result<get_key
|
||||||
pub async fn claim_keys_route(
|
pub async fn claim_keys_route(
|
||||||
body: Ruma<claim_keys::v1::Request>,
|
body: Ruma<claim_keys::v1::Request>,
|
||||||
) -> Result<claim_keys::v1::Response> {
|
) -> Result<claim_keys::v1::Response> {
|
||||||
if !services().globals.allow_federation() {
|
if body
|
||||||
return Err(Error::bad_config("Federation is disabled."));
|
.one_time_keys
|
||||||
|
.iter()
|
||||||
|
.any(|(u, _)| u.server_name() != services().globals.server_name())
|
||||||
|
{
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Tried to access user from other server.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let result = claim_keys_helper(&body.one_time_keys).await?;
|
let result = claim_keys_helper(&body.one_time_keys).await?;
|
||||||
|
|
27
src/clap.rs
Normal file
27
src/clap.rs
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
//! Integration with `clap`
|
||||||
|
|
||||||
|
use clap::Parser;
|
||||||
|
|
||||||
|
/// Returns the current version of the crate with extra info if supplied
|
||||||
|
///
|
||||||
|
/// Set the environment variable `CONDUIT_VERSION_EXTRA` to any UTF-8 string to
|
||||||
|
/// include it in parenthesis after the SemVer version. A common value are git
|
||||||
|
/// commit hashes.
|
||||||
|
fn version() -> String {
|
||||||
|
let cargo_pkg_version = env!("CARGO_PKG_VERSION");
|
||||||
|
|
||||||
|
match option_env!("CONDUIT_VERSION_EXTRA") {
|
||||||
|
Some(x) => format!("{} ({})", cargo_pkg_version, x),
|
||||||
|
None => cargo_pkg_version.to_owned(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Command line arguments
|
||||||
|
#[derive(Parser)]
|
||||||
|
#[clap(about, version = version())]
|
||||||
|
pub struct Args {}
|
||||||
|
|
||||||
|
/// Parse command line arguments into structured data
|
||||||
|
pub fn parse() -> Args {
|
||||||
|
Args::parse()
|
||||||
|
}
|
|
@ -264,7 +264,7 @@ fn default_trusted_servers() -> Vec<OwnedServerName> {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_log() -> String {
|
fn default_log() -> String {
|
||||||
"warn,state_res=warn,_=off,sled=off".to_owned()
|
"warn,state_res=warn,_=off".to_owned()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_turn_ttl() -> u64 {
|
fn default_turn_ttl() -> u64 {
|
||||||
|
|
|
@ -29,7 +29,9 @@ use crate::Result;
|
||||||
/// would be used for `ordinary.onion`, `matrix.myspecial.onion`, but not `hello.myspecial.onion`.
|
/// would be used for `ordinary.onion`, `matrix.myspecial.onion`, but not `hello.myspecial.onion`.
|
||||||
#[derive(Clone, Debug, Deserialize)]
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
#[serde(rename_all = "snake_case")]
|
#[serde(rename_all = "snake_case")]
|
||||||
|
#[derive(Default)]
|
||||||
pub enum ProxyConfig {
|
pub enum ProxyConfig {
|
||||||
|
#[default]
|
||||||
None,
|
None,
|
||||||
Global {
|
Global {
|
||||||
#[serde(deserialize_with = "crate::utils::deserialize_from_str")]
|
#[serde(deserialize_with = "crate::utils::deserialize_from_str")]
|
||||||
|
@ -48,11 +50,6 @@ impl ProxyConfig {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl Default for ProxyConfig {
|
|
||||||
fn default() -> Self {
|
|
||||||
ProxyConfig::None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize)]
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
pub struct PartialProxyConfig {
|
pub struct PartialProxyConfig {
|
||||||
|
|
|
@ -116,7 +116,7 @@ impl KvTree for PersyTree {
|
||||||
match iter {
|
match iter {
|
||||||
Ok(iter) => Box::new(iter.filter_map(|(k, v)| {
|
Ok(iter) => Box::new(iter.filter_map(|(k, v)| {
|
||||||
v.into_iter()
|
v.into_iter()
|
||||||
.map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
|
.map(|val| ((*k).to_owned(), (*val).to_owned()))
|
||||||
.next()
|
.next()
|
||||||
})),
|
})),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
|
@ -142,7 +142,7 @@ impl KvTree for PersyTree {
|
||||||
Ok(iter) => {
|
Ok(iter) => {
|
||||||
let map = iter.filter_map(|(k, v)| {
|
let map = iter.filter_map(|(k, v)| {
|
||||||
v.into_iter()
|
v.into_iter()
|
||||||
.map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
|
.map(|val| ((*k).to_owned(), (*val).to_owned()))
|
||||||
.next()
|
.next()
|
||||||
});
|
});
|
||||||
if backwards {
|
if backwards {
|
||||||
|
@ -179,7 +179,7 @@ impl KvTree for PersyTree {
|
||||||
iter.take_while(move |(k, _)| (*k).starts_with(&owned_prefix))
|
iter.take_while(move |(k, _)| (*k).starts_with(&owned_prefix))
|
||||||
.filter_map(|(k, v)| {
|
.filter_map(|(k, v)| {
|
||||||
v.into_iter()
|
v.into_iter()
|
||||||
.map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
|
.map(|val| ((*k).to_owned(), (*val).to_owned()))
|
||||||
.next()
|
.next()
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
|
|
|
@ -23,32 +23,29 @@ pub struct RocksDbEngineTree<'a> {
|
||||||
fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::Options {
|
fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::Options {
|
||||||
let mut block_based_options = rocksdb::BlockBasedOptions::default();
|
let mut block_based_options = rocksdb::BlockBasedOptions::default();
|
||||||
block_based_options.set_block_cache(rocksdb_cache);
|
block_based_options.set_block_cache(rocksdb_cache);
|
||||||
|
block_based_options.set_bloom_filter(10.0, false);
|
||||||
// "Difference of spinning disk"
|
|
||||||
// https://zhangyuchi.gitbooks.io/rocksdbbook/content/RocksDB-Tuning-Guide.html
|
|
||||||
block_based_options.set_block_size(4 * 1024);
|
block_based_options.set_block_size(4 * 1024);
|
||||||
block_based_options.set_cache_index_and_filter_blocks(true);
|
block_based_options.set_cache_index_and_filter_blocks(true);
|
||||||
|
block_based_options.set_pin_l0_filter_and_index_blocks_in_cache(true);
|
||||||
|
block_based_options.set_optimize_filters_for_memory(true);
|
||||||
|
|
||||||
let mut db_opts = rocksdb::Options::default();
|
let mut db_opts = rocksdb::Options::default();
|
||||||
db_opts.set_block_based_table_factory(&block_based_options);
|
db_opts.set_block_based_table_factory(&block_based_options);
|
||||||
db_opts.set_optimize_filters_for_hits(true);
|
|
||||||
db_opts.set_skip_stats_update_on_db_open(true);
|
|
||||||
db_opts.set_level_compaction_dynamic_level_bytes(true);
|
|
||||||
db_opts.set_target_file_size_base(256 * 1024 * 1024);
|
|
||||||
//db_opts.set_compaction_readahead_size(2 * 1024 * 1024);
|
|
||||||
//db_opts.set_use_direct_reads(true);
|
|
||||||
//db_opts.set_use_direct_io_for_flush_and_compaction(true);
|
|
||||||
db_opts.create_if_missing(true);
|
db_opts.create_if_missing(true);
|
||||||
db_opts.increase_parallelism(num_cpus::get() as i32);
|
db_opts.increase_parallelism(num_cpus::get() as i32);
|
||||||
db_opts.set_max_open_files(max_open_files);
|
db_opts.set_max_open_files(max_open_files);
|
||||||
db_opts.set_compression_type(rocksdb::DBCompressionType::Zstd);
|
db_opts.set_compression_type(rocksdb::DBCompressionType::Lz4);
|
||||||
|
db_opts.set_bottommost_compression_type(rocksdb::DBCompressionType::Zstd);
|
||||||
db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level);
|
db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level);
|
||||||
db_opts.optimize_level_style_compaction(10 * 1024 * 1024);
|
|
||||||
|
|
||||||
// https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning
|
// https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning
|
||||||
|
db_opts.set_level_compaction_dynamic_level_bytes(true);
|
||||||
db_opts.set_max_background_jobs(6);
|
db_opts.set_max_background_jobs(6);
|
||||||
db_opts.set_bytes_per_sync(1048576);
|
db_opts.set_bytes_per_sync(1048576);
|
||||||
|
|
||||||
|
// https://github.com/facebook/rocksdb/issues/849
|
||||||
|
db_opts.set_keep_log_file_num(100);
|
||||||
|
|
||||||
// https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes#ktoleratecorruptedtailrecords
|
// https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes#ktoleratecorruptedtailrecords
|
||||||
//
|
//
|
||||||
// Unclean shutdowns of a Matrix homeserver are likely to be fine when
|
// Unclean shutdowns of a Matrix homeserver are likely to be fine when
|
||||||
|
@ -56,9 +53,6 @@ fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::O
|
||||||
// restored via federation.
|
// restored via federation.
|
||||||
db_opts.set_wal_recovery_mode(rocksdb::DBRecoveryMode::TolerateCorruptedTailRecords);
|
db_opts.set_wal_recovery_mode(rocksdb::DBRecoveryMode::TolerateCorruptedTailRecords);
|
||||||
|
|
||||||
let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1);
|
|
||||||
db_opts.set_prefix_extractor(prefix_extractor);
|
|
||||||
|
|
||||||
db_opts
|
db_opts
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -144,12 +138,17 @@ impl RocksDbEngineTree<'_> {
|
||||||
|
|
||||||
impl KvTree for RocksDbEngineTree<'_> {
|
impl KvTree for RocksDbEngineTree<'_> {
|
||||||
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
||||||
Ok(self.db.rocks.get_cf(&self.cf(), key)?)
|
let readoptions = rocksdb::ReadOptions::default();
|
||||||
|
|
||||||
|
Ok(self.db.rocks.get_cf_opt(&self.cf(), key, &readoptions)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> {
|
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> {
|
||||||
|
let writeoptions = rocksdb::WriteOptions::default();
|
||||||
let lock = self.write_lock.read().unwrap();
|
let lock = self.write_lock.read().unwrap();
|
||||||
self.db.rocks.put_cf(&self.cf(), key, value)?;
|
self.db
|
||||||
|
.rocks
|
||||||
|
.put_cf_opt(&self.cf(), key, value, &writeoptions)?;
|
||||||
drop(lock);
|
drop(lock);
|
||||||
|
|
||||||
self.watchers.wake(key);
|
self.watchers.wake(key);
|
||||||
|
@ -158,22 +157,31 @@ impl KvTree for RocksDbEngineTree<'_> {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn insert_batch<'a>(&self, iter: &mut dyn Iterator<Item = (Vec<u8>, Vec<u8>)>) -> Result<()> {
|
fn insert_batch<'a>(&self, iter: &mut dyn Iterator<Item = (Vec<u8>, Vec<u8>)>) -> Result<()> {
|
||||||
|
let writeoptions = rocksdb::WriteOptions::default();
|
||||||
for (key, value) in iter {
|
for (key, value) in iter {
|
||||||
self.db.rocks.put_cf(&self.cf(), key, value)?;
|
self.db
|
||||||
|
.rocks
|
||||||
|
.put_cf_opt(&self.cf(), key, value, &writeoptions)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn remove(&self, key: &[u8]) -> Result<()> {
|
fn remove(&self, key: &[u8]) -> Result<()> {
|
||||||
Ok(self.db.rocks.delete_cf(&self.cf(), key)?)
|
let writeoptions = rocksdb::WriteOptions::default();
|
||||||
|
Ok(self
|
||||||
|
.db
|
||||||
|
.rocks
|
||||||
|
.delete_cf_opt(&self.cf(), key, &writeoptions)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
||||||
|
let readoptions = rocksdb::ReadOptions::default();
|
||||||
|
|
||||||
Box::new(
|
Box::new(
|
||||||
self.db
|
self.db
|
||||||
.rocks
|
.rocks
|
||||||
.iterator_cf(&self.cf(), rocksdb::IteratorMode::Start)
|
.iterator_cf_opt(&self.cf(), readoptions, rocksdb::IteratorMode::Start)
|
||||||
.map(|r| r.unwrap())
|
.map(|r| r.unwrap())
|
||||||
.map(|(k, v)| (Vec::from(k), Vec::from(v))),
|
.map(|(k, v)| (Vec::from(k), Vec::from(v))),
|
||||||
)
|
)
|
||||||
|
@ -184,11 +192,14 @@ impl KvTree for RocksDbEngineTree<'_> {
|
||||||
from: &[u8],
|
from: &[u8],
|
||||||
backwards: bool,
|
backwards: bool,
|
||||||
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
||||||
|
let readoptions = rocksdb::ReadOptions::default();
|
||||||
|
|
||||||
Box::new(
|
Box::new(
|
||||||
self.db
|
self.db
|
||||||
.rocks
|
.rocks
|
||||||
.iterator_cf(
|
.iterator_cf_opt(
|
||||||
&self.cf(),
|
&self.cf(),
|
||||||
|
readoptions,
|
||||||
rocksdb::IteratorMode::From(
|
rocksdb::IteratorMode::From(
|
||||||
from,
|
from,
|
||||||
if backwards {
|
if backwards {
|
||||||
|
@ -204,23 +215,33 @@ impl KvTree for RocksDbEngineTree<'_> {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
|
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
|
||||||
|
let readoptions = rocksdb::ReadOptions::default();
|
||||||
|
let writeoptions = rocksdb::WriteOptions::default();
|
||||||
|
|
||||||
let lock = self.write_lock.write().unwrap();
|
let lock = self.write_lock.write().unwrap();
|
||||||
|
|
||||||
let old = self.db.rocks.get_cf(&self.cf(), key)?;
|
let old = self.db.rocks.get_cf_opt(&self.cf(), key, &readoptions)?;
|
||||||
let new = utils::increment(old.as_deref()).unwrap();
|
let new = utils::increment(old.as_deref()).unwrap();
|
||||||
self.db.rocks.put_cf(&self.cf(), key, &new)?;
|
self.db
|
||||||
|
.rocks
|
||||||
|
.put_cf_opt(&self.cf(), key, &new, &writeoptions)?;
|
||||||
|
|
||||||
drop(lock);
|
drop(lock);
|
||||||
Ok(new)
|
Ok(new)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn increment_batch<'a>(&self, iter: &mut dyn Iterator<Item = Vec<u8>>) -> Result<()> {
|
fn increment_batch<'a>(&self, iter: &mut dyn Iterator<Item = Vec<u8>>) -> Result<()> {
|
||||||
|
let readoptions = rocksdb::ReadOptions::default();
|
||||||
|
let writeoptions = rocksdb::WriteOptions::default();
|
||||||
|
|
||||||
let lock = self.write_lock.write().unwrap();
|
let lock = self.write_lock.write().unwrap();
|
||||||
|
|
||||||
for key in iter {
|
for key in iter {
|
||||||
let old = self.db.rocks.get_cf(&self.cf(), &key)?;
|
let old = self.db.rocks.get_cf_opt(&self.cf(), &key, &readoptions)?;
|
||||||
let new = utils::increment(old.as_deref()).unwrap();
|
let new = utils::increment(old.as_deref()).unwrap();
|
||||||
self.db.rocks.put_cf(&self.cf(), key, new)?;
|
self.db
|
||||||
|
.rocks
|
||||||
|
.put_cf_opt(&self.cf(), key, new, &writeoptions)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
drop(lock);
|
drop(lock);
|
||||||
|
@ -232,11 +253,14 @@ impl KvTree for RocksDbEngineTree<'_> {
|
||||||
&'a self,
|
&'a self,
|
||||||
prefix: Vec<u8>,
|
prefix: Vec<u8>,
|
||||||
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
||||||
|
let readoptions = rocksdb::ReadOptions::default();
|
||||||
|
|
||||||
Box::new(
|
Box::new(
|
||||||
self.db
|
self.db
|
||||||
.rocks
|
.rocks
|
||||||
.iterator_cf(
|
.iterator_cf_opt(
|
||||||
&self.cf(),
|
&self.cf(),
|
||||||
|
readoptions,
|
||||||
rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward),
|
rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward),
|
||||||
)
|
)
|
||||||
.map(|r| r.unwrap())
|
.map(|r| r.unwrap())
|
||||||
|
|
|
@ -33,7 +33,7 @@ impl Iterator for PreparedStatementIterator<'_> {
|
||||||
struct NonAliasingBox<T>(*mut T);
|
struct NonAliasingBox<T>(*mut T);
|
||||||
impl<T> Drop for NonAliasingBox<T> {
|
impl<T> Drop for NonAliasingBox<T> {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
unsafe { Box::from_raw(self.0) };
|
drop(unsafe { Box::from_raw(self.0) });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -8,6 +8,7 @@ use tokio::sync::watch;
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub(super) struct Watchers {
|
pub(super) struct Watchers {
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
watchers: RwLock<HashMap<Vec<u8>, (watch::Sender<()>, watch::Receiver<()>)>>,
|
watchers: RwLock<HashMap<Vec<u8>, (watch::Sender<()>, watch::Receiver<()>)>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -123,13 +123,12 @@ impl service::account_data::Data for KeyValueDatabase {
|
||||||
.take_while(move |(k, _)| k.starts_with(&prefix))
|
.take_while(move |(k, _)| k.starts_with(&prefix))
|
||||||
.map(|(k, v)| {
|
.map(|(k, v)| {
|
||||||
Ok::<_, Error>((
|
Ok::<_, Error>((
|
||||||
RoomAccountDataEventType::try_from(
|
RoomAccountDataEventType::from(
|
||||||
utils::string_from_bytes(k.rsplit(|&b| b == 0xff).next().ok_or_else(
|
utils::string_from_bytes(k.rsplit(|&b| b == 0xff).next().ok_or_else(
|
||||||
|| Error::bad_database("RoomUserData ID in db is invalid."),
|
|| Error::bad_database("RoomUserData ID in db is invalid."),
|
||||||
)?)
|
)?)
|
||||||
.map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?,
|
.map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?,
|
||||||
)
|
),
|
||||||
.map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?,
|
|
||||||
serde_json::from_slice::<Raw<AnyEphemeralRoomEvent>>(&v).map_err(|_| {
|
serde_json::from_slice::<Raw<AnyEphemeralRoomEvent>>(&v).map_err(|_| {
|
||||||
Error::bad_database("Database contains invalid account data.")
|
Error::bad_database("Database contains invalid account data.")
|
||||||
})?,
|
})?,
|
||||||
|
|
|
@ -1,18 +1,15 @@
|
||||||
|
use ruma::api::appservice::Registration;
|
||||||
|
|
||||||
use crate::{database::KeyValueDatabase, service, utils, Error, Result};
|
use crate::{database::KeyValueDatabase, service, utils, Error, Result};
|
||||||
|
|
||||||
impl service::appservice::Data for KeyValueDatabase {
|
impl service::appservice::Data for KeyValueDatabase {
|
||||||
/// Registers an appservice and returns the ID to the caller
|
/// Registers an appservice and returns the ID to the caller
|
||||||
fn register_appservice(&self, yaml: serde_yaml::Value) -> Result<String> {
|
fn register_appservice(&self, yaml: Registration) -> Result<String> {
|
||||||
// TODO: Rumaify
|
let id = yaml.id.as_str();
|
||||||
let id = yaml.get("id").unwrap().as_str().unwrap();
|
|
||||||
self.id_appserviceregistrations.insert(
|
self.id_appserviceregistrations.insert(
|
||||||
id.as_bytes(),
|
id.as_bytes(),
|
||||||
serde_yaml::to_string(&yaml).unwrap().as_bytes(),
|
serde_yaml::to_string(&yaml).unwrap().as_bytes(),
|
||||||
)?;
|
)?;
|
||||||
self.cached_registrations
|
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.insert(id.to_owned(), yaml.to_owned());
|
|
||||||
|
|
||||||
Ok(id.to_owned())
|
Ok(id.to_owned())
|
||||||
}
|
}
|
||||||
|
@ -25,33 +22,18 @@ impl service::appservice::Data for KeyValueDatabase {
|
||||||
fn unregister_appservice(&self, service_name: &str) -> Result<()> {
|
fn unregister_appservice(&self, service_name: &str) -> Result<()> {
|
||||||
self.id_appserviceregistrations
|
self.id_appserviceregistrations
|
||||||
.remove(service_name.as_bytes())?;
|
.remove(service_name.as_bytes())?;
|
||||||
self.cached_registrations
|
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.remove(service_name);
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_registration(&self, id: &str) -> Result<Option<serde_yaml::Value>> {
|
fn get_registration(&self, id: &str) -> Result<Option<Registration>> {
|
||||||
self.cached_registrations
|
self.id_appserviceregistrations
|
||||||
.read()
|
.get(id.as_bytes())?
|
||||||
.unwrap()
|
.map(|bytes| {
|
||||||
.get(id)
|
serde_yaml::from_slice(&bytes).map_err(|_| {
|
||||||
.map_or_else(
|
Error::bad_database("Invalid registration bytes in id_appserviceregistrations.")
|
||||||
|| {
|
})
|
||||||
self.id_appserviceregistrations
|
})
|
||||||
.get(id.as_bytes())?
|
.transpose()
|
||||||
.map(|bytes| {
|
|
||||||
serde_yaml::from_slice(&bytes).map_err(|_| {
|
|
||||||
Error::bad_database(
|
|
||||||
"Invalid registration bytes in id_appserviceregistrations.",
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.transpose()
|
|
||||||
},
|
|
||||||
|r| Ok(Some(r.clone())),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn iter_ids<'a>(&'a self) -> Result<Box<dyn Iterator<Item = Result<String>> + 'a>> {
|
fn iter_ids<'a>(&'a self) -> Result<Box<dyn Iterator<Item = Result<String>> + 'a>> {
|
||||||
|
@ -64,7 +46,7 @@ impl service::appservice::Data for KeyValueDatabase {
|
||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn all(&self) -> Result<Vec<(String, serde_yaml::Value)>> {
|
fn all(&self) -> Result<Vec<(String, Registration)>> {
|
||||||
self.iter_ids()?
|
self.iter_ids()?
|
||||||
.filter_map(|id| id.ok())
|
.filter_map(|id| id.ok())
|
||||||
.map(move |id| {
|
.map(move |id| {
|
||||||
|
|
|
@ -94,7 +94,9 @@ impl service::globals::Data for KeyValueDatabase {
|
||||||
futures.push(self.pduid_pdu.watch_prefix(&short_roomid));
|
futures.push(self.pduid_pdu.watch_prefix(&short_roomid));
|
||||||
|
|
||||||
// EDUs
|
// EDUs
|
||||||
futures.push(self.roomid_lasttypingupdate.watch_prefix(&roomid_bytes));
|
futures.push(Box::into_pin(Box::new(async move {
|
||||||
|
let _result = services().rooms.edus.typing.wait_for_update(&room_id).await;
|
||||||
|
})));
|
||||||
|
|
||||||
futures.push(self.readreceiptid_readreceipt.watch_prefix(&roomid_prefix));
|
futures.push(self.readreceiptid_readreceipt.watch_prefix(&roomid_prefix));
|
||||||
|
|
||||||
|
@ -256,8 +258,8 @@ lasttimelinecount_cache: {lasttimelinecount_cache}\n"
|
||||||
..
|
..
|
||||||
} = new_keys;
|
} = new_keys;
|
||||||
|
|
||||||
keys.verify_keys.extend(verify_keys.into_iter());
|
keys.verify_keys.extend(verify_keys);
|
||||||
keys.old_verify_keys.extend(old_verify_keys.into_iter());
|
keys.old_verify_keys.extend(old_verify_keys);
|
||||||
|
|
||||||
self.server_signingkeys.insert(
|
self.server_signingkeys.insert(
|
||||||
origin.as_bytes(),
|
origin.as_bytes(),
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
mod presence;
|
mod presence;
|
||||||
mod read_receipt;
|
mod read_receipt;
|
||||||
mod typing;
|
|
||||||
|
|
||||||
use crate::{database::KeyValueDatabase, service};
|
use crate::{database::KeyValueDatabase, service};
|
||||||
|
|
||||||
|
|
|
@ -1,127 +0,0 @@
|
||||||
use std::{collections::HashSet, mem};
|
|
||||||
|
|
||||||
use ruma::{OwnedUserId, RoomId, UserId};
|
|
||||||
|
|
||||||
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
|
||||||
|
|
||||||
impl service::rooms::edus::typing::Data for KeyValueDatabase {
|
|
||||||
fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> {
|
|
||||||
let mut prefix = room_id.as_bytes().to_vec();
|
|
||||||
prefix.push(0xff);
|
|
||||||
|
|
||||||
let count = services().globals.next_count()?.to_be_bytes();
|
|
||||||
|
|
||||||
let mut room_typing_id = prefix;
|
|
||||||
room_typing_id.extend_from_slice(&timeout.to_be_bytes());
|
|
||||||
room_typing_id.push(0xff);
|
|
||||||
room_typing_id.extend_from_slice(&count);
|
|
||||||
|
|
||||||
self.typingid_userid
|
|
||||||
.insert(&room_typing_id, user_id.as_bytes())?;
|
|
||||||
|
|
||||||
self.roomid_lasttypingupdate
|
|
||||||
.insert(room_id.as_bytes(), &count)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
|
||||||
let mut prefix = room_id.as_bytes().to_vec();
|
|
||||||
prefix.push(0xff);
|
|
||||||
|
|
||||||
let user_id = user_id.to_string();
|
|
||||||
|
|
||||||
let mut found_outdated = false;
|
|
||||||
|
|
||||||
// Maybe there are multiple ones from calling roomtyping_add multiple times
|
|
||||||
for outdated_edu in self
|
|
||||||
.typingid_userid
|
|
||||||
.scan_prefix(prefix)
|
|
||||||
.filter(|(_, v)| &**v == user_id.as_bytes())
|
|
||||||
{
|
|
||||||
self.typingid_userid.remove(&outdated_edu.0)?;
|
|
||||||
found_outdated = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if found_outdated {
|
|
||||||
self.roomid_lasttypingupdate.insert(
|
|
||||||
room_id.as_bytes(),
|
|
||||||
&services().globals.next_count()?.to_be_bytes(),
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn typings_maintain(&self, room_id: &RoomId) -> Result<()> {
|
|
||||||
let mut prefix = room_id.as_bytes().to_vec();
|
|
||||||
prefix.push(0xff);
|
|
||||||
|
|
||||||
let current_timestamp = utils::millis_since_unix_epoch();
|
|
||||||
|
|
||||||
let mut found_outdated = false;
|
|
||||||
|
|
||||||
// Find all outdated edus before inserting a new one
|
|
||||||
for outdated_edu in self
|
|
||||||
.typingid_userid
|
|
||||||
.scan_prefix(prefix)
|
|
||||||
.map(|(key, _)| {
|
|
||||||
Ok::<_, Error>((
|
|
||||||
key.clone(),
|
|
||||||
utils::u64_from_bytes(
|
|
||||||
&key.splitn(2, |&b| b == 0xff).nth(1).ok_or_else(|| {
|
|
||||||
Error::bad_database("RoomTyping has invalid timestamp or delimiters.")
|
|
||||||
})?[0..mem::size_of::<u64>()],
|
|
||||||
)
|
|
||||||
.map_err(|_| Error::bad_database("RoomTyping has invalid timestamp bytes."))?,
|
|
||||||
))
|
|
||||||
})
|
|
||||||
.filter_map(|r| r.ok())
|
|
||||||
.take_while(|&(_, timestamp)| timestamp < current_timestamp)
|
|
||||||
{
|
|
||||||
// This is an outdated edu (time > timestamp)
|
|
||||||
self.typingid_userid.remove(&outdated_edu.0)?;
|
|
||||||
found_outdated = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if found_outdated {
|
|
||||||
self.roomid_lasttypingupdate.insert(
|
|
||||||
room_id.as_bytes(),
|
|
||||||
&services().globals.next_count()?.to_be_bytes(),
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn last_typing_update(&self, room_id: &RoomId) -> Result<u64> {
|
|
||||||
Ok(self
|
|
||||||
.roomid_lasttypingupdate
|
|
||||||
.get(room_id.as_bytes())?
|
|
||||||
.map(|bytes| {
|
|
||||||
utils::u64_from_bytes(&bytes).map_err(|_| {
|
|
||||||
Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.transpose()?
|
|
||||||
.unwrap_or(0))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn typings_all(&self, room_id: &RoomId) -> Result<HashSet<OwnedUserId>> {
|
|
||||||
let mut prefix = room_id.as_bytes().to_vec();
|
|
||||||
prefix.push(0xff);
|
|
||||||
|
|
||||||
let mut user_ids = HashSet::new();
|
|
||||||
|
|
||||||
for (_, user_id) in self.typingid_userid.scan_prefix(prefix) {
|
|
||||||
let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| {
|
|
||||||
Error::bad_database("User ID in typingid_userid is invalid unicode.")
|
|
||||||
})?)
|
|
||||||
.map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?;
|
|
||||||
|
|
||||||
user_ids.insert(user_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(user_ids)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -157,10 +157,9 @@ impl service::rooms::short::Data for KeyValueDatabase {
|
||||||
.ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?;
|
.ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?;
|
||||||
|
|
||||||
let event_type =
|
let event_type =
|
||||||
StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| {
|
StateEventType::from(utils::string_from_bytes(eventtype_bytes).map_err(|_| {
|
||||||
Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.")
|
Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.")
|
||||||
})?)
|
})?);
|
||||||
.map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?;
|
|
||||||
|
|
||||||
let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| {
|
let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| {
|
||||||
Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.")
|
Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.")
|
||||||
|
|
|
@ -20,7 +20,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
||||||
let parsed = services()
|
let parsed = services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_compressor
|
.state_compressor
|
||||||
.parse_compressed_state_event(&compressed)?;
|
.parse_compressed_state_event(compressed)?;
|
||||||
result.insert(parsed.0, parsed.1);
|
result.insert(parsed.0, parsed.1);
|
||||||
|
|
||||||
i += 1;
|
i += 1;
|
||||||
|
@ -49,7 +49,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
||||||
let (_, eventid) = services()
|
let (_, eventid) = services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_compressor
|
.state_compressor
|
||||||
.parse_compressed_state_event(&compressed)?;
|
.parse_compressed_state_event(compressed)?;
|
||||||
if let Some(pdu) = services().rooms.timeline.get_pdu(&eventid)? {
|
if let Some(pdu) = services().rooms.timeline.get_pdu(&eventid)? {
|
||||||
result.insert(
|
result.insert(
|
||||||
(
|
(
|
||||||
|
@ -101,7 +101,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_compressor
|
.state_compressor
|
||||||
.parse_compressed_state_event(&compressed)
|
.parse_compressed_state_event(compressed)
|
||||||
.ok()
|
.ok()
|
||||||
.map(|(_, id)| id)
|
.map(|(_, id)| id)
|
||||||
}))
|
}))
|
||||||
|
|
|
@ -1,13 +1,16 @@
|
||||||
use std::{collections::HashSet, sync::Arc};
|
use std::{collections::HashSet, sync::Arc};
|
||||||
|
|
||||||
use regex::Regex;
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
events::{AnyStrippedStateEvent, AnySyncStateEvent},
|
events::{AnyStrippedStateEvent, AnySyncStateEvent},
|
||||||
serde::Raw,
|
serde::Raw,
|
||||||
OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
|
OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
use crate::{
|
||||||
|
database::KeyValueDatabase,
|
||||||
|
service::{self, appservice::RegistrationInfo},
|
||||||
|
services, utils, Error, Result,
|
||||||
|
};
|
||||||
|
|
||||||
impl service::rooms::state_cache::Data for KeyValueDatabase {
|
impl service::rooms::state_cache::Data for KeyValueDatabase {
|
||||||
fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
||||||
|
@ -184,46 +187,28 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self, room_id, appservice))]
|
#[tracing::instrument(skip(self, room_id, appservice))]
|
||||||
fn appservice_in_room(
|
fn appservice_in_room(&self, room_id: &RoomId, appservice: &RegistrationInfo) -> Result<bool> {
|
||||||
&self,
|
|
||||||
room_id: &RoomId,
|
|
||||||
appservice: &(String, serde_yaml::Value),
|
|
||||||
) -> Result<bool> {
|
|
||||||
let maybe = self
|
let maybe = self
|
||||||
.appservice_in_room_cache
|
.appservice_in_room_cache
|
||||||
.read()
|
.read()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.get(room_id)
|
.get(room_id)
|
||||||
.and_then(|map| map.get(&appservice.0))
|
.and_then(|map| map.get(&appservice.registration.id))
|
||||||
.copied();
|
.copied();
|
||||||
|
|
||||||
if let Some(b) = maybe {
|
if let Some(b) = maybe {
|
||||||
Ok(b)
|
Ok(b)
|
||||||
} else if let Some(namespaces) = appservice.1.get("namespaces") {
|
} else {
|
||||||
let users = namespaces
|
let bridge_user_id = UserId::parse_with_server_name(
|
||||||
.get("users")
|
appservice.registration.sender_localpart.as_str(),
|
||||||
.and_then(|users| users.as_sequence())
|
services().globals.server_name(),
|
||||||
.map_or_else(Vec::new, |users| {
|
)
|
||||||
users
|
.ok();
|
||||||
.iter()
|
|
||||||
.filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok())
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
});
|
|
||||||
|
|
||||||
let bridge_user_id = appservice
|
|
||||||
.1
|
|
||||||
.get("sender_localpart")
|
|
||||||
.and_then(|string| string.as_str())
|
|
||||||
.and_then(|string| {
|
|
||||||
UserId::parse_with_server_name(string, services().globals.server_name()).ok()
|
|
||||||
});
|
|
||||||
|
|
||||||
let in_room = bridge_user_id
|
let in_room = bridge_user_id
|
||||||
.map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false))
|
.map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false))
|
||||||
|| self.room_members(room_id).any(|userid| {
|
|| self.room_members(room_id).any(|userid| {
|
||||||
userid.map_or(false, |userid| {
|
userid.map_or(false, |userid| appservice.users.is_match(userid.as_str()))
|
||||||
users.iter().any(|r| r.is_match(userid.as_str()))
|
|
||||||
})
|
|
||||||
});
|
});
|
||||||
|
|
||||||
self.appservice_in_room_cache
|
self.appservice_in_room_cache
|
||||||
|
@ -231,11 +216,9 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.entry(room_id.to_owned())
|
.entry(room_id.to_owned())
|
||||||
.or_default()
|
.or_default()
|
||||||
.insert(appservice.0.clone(), in_room);
|
.insert(appservice.registration.id.clone(), in_room);
|
||||||
|
|
||||||
Ok(in_room)
|
Ok(in_room)
|
||||||
} else {
|
|
||||||
Ok(false)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -471,6 +454,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an iterator over all rooms a user was invited to.
|
/// Returns an iterator over all rooms a user was invited to.
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
fn rooms_invited<'a>(
|
fn rooms_invited<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
|
@ -549,6 +533,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an iterator over all rooms a user left.
|
/// Returns an iterator over all rooms a user left.
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
fn rooms_left<'a>(
|
fn rooms_left<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
|
|
|
@ -10,7 +10,7 @@ impl service::rooms::threads::Data for KeyValueDatabase {
|
||||||
user_id: &'a UserId,
|
user_id: &'a UserId,
|
||||||
room_id: &'a RoomId,
|
room_id: &'a RoomId,
|
||||||
until: u64,
|
until: u64,
|
||||||
include: &'a IncludeThreads,
|
_include: &'a IncludeThreads,
|
||||||
) -> Result<Box<dyn Iterator<Item = Result<(u64, PduEvent)>> + 'a>> {
|
) -> Result<Box<dyn Iterator<Item = Result<(u64, PduEvent)>> + 'a>> {
|
||||||
let prefix = services()
|
let prefix = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -27,7 +27,7 @@ impl service::rooms::threads::Data for KeyValueDatabase {
|
||||||
self.threadid_userids
|
self.threadid_userids
|
||||||
.iter_from(¤t, true)
|
.iter_from(¤t, true)
|
||||||
.take_while(move |(k, _)| k.starts_with(&prefix))
|
.take_while(move |(k, _)| k.starts_with(&prefix))
|
||||||
.map(move |(pduid, users)| {
|
.map(move |(pduid, _users)| {
|
||||||
let count = utils::u64_from_bytes(&pduid[(mem::size_of::<u64>())..])
|
let count = utils::u64_from_bytes(&pduid[(mem::size_of::<u64>())..])
|
||||||
.map_err(|_| Error::bad_database("Invalid pduid in threadid_userids."))?;
|
.map_err(|_| Error::bad_database("Invalid pduid in threadid_userids."))?;
|
||||||
let mut pdu = services()
|
let mut pdu = services()
|
||||||
|
@ -52,13 +52,13 @@ impl service::rooms::threads::Data for KeyValueDatabase {
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.join(&[0xff][..]);
|
.join(&[0xff][..]);
|
||||||
|
|
||||||
self.threadid_userids.insert(&root_id, &users)?;
|
self.threadid_userids.insert(root_id, &users)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_participants(&self, root_id: &[u8]) -> Result<Option<Vec<OwnedUserId>>> {
|
fn get_participants(&self, root_id: &[u8]) -> Result<Option<Vec<OwnedUserId>>> {
|
||||||
if let Some(users) = self.threadid_userids.get(&root_id)? {
|
if let Some(users) = self.threadid_userids.get(root_id)? {
|
||||||
Ok(Some(
|
Ok(Some(
|
||||||
users
|
users
|
||||||
.split(|b| *b == 0xff)
|
.split(|b| *b == 0xff)
|
||||||
|
|
|
@ -39,11 +39,10 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
||||||
|
|
||||||
/// Returns the `count` of this pdu's id.
|
/// Returns the `count` of this pdu's id.
|
||||||
fn get_pdu_count(&self, event_id: &EventId) -> Result<Option<PduCount>> {
|
fn get_pdu_count(&self, event_id: &EventId) -> Result<Option<PduCount>> {
|
||||||
Ok(self
|
self.eventid_pduid
|
||||||
.eventid_pduid
|
|
||||||
.get(event_id.as_bytes())?
|
.get(event_id.as_bytes())?
|
||||||
.map(|pdu_id| pdu_count(&pdu_id))
|
.map(|pdu_id| pdu_count(&pdu_id))
|
||||||
.transpose()?)
|
.transpose()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the json of a pdu.
|
/// Returns the json of a pdu.
|
||||||
|
@ -80,7 +79,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
||||||
|
|
||||||
/// Returns the pdu's id.
|
/// Returns the pdu's id.
|
||||||
fn get_pdu_id(&self, event_id: &EventId) -> Result<Option<Vec<u8>>> {
|
fn get_pdu_id(&self, event_id: &EventId) -> Result<Option<Vec<u8>>> {
|
||||||
Ok(self.eventid_pduid.get(event_id.as_bytes())?)
|
self.eventid_pduid.get(event_id.as_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the pdu.
|
/// Returns the pdu.
|
||||||
|
@ -230,7 +229,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
until: PduCount,
|
until: PduCount,
|
||||||
) -> Result<Box<dyn Iterator<Item = Result<(PduCount, PduEvent)>> + 'a>> {
|
) -> Result<Box<dyn Iterator<Item = Result<(PduCount, PduEvent)>> + 'a>> {
|
||||||
let (prefix, current) = count_to_id(&room_id, until, 1, true)?;
|
let (prefix, current) = count_to_id(room_id, until, 1, true)?;
|
||||||
|
|
||||||
let user_id = user_id.to_owned();
|
let user_id = user_id.to_owned();
|
||||||
|
|
||||||
|
@ -257,7 +256,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
from: PduCount,
|
from: PduCount,
|
||||||
) -> Result<Box<dyn Iterator<Item = Result<(PduCount, PduEvent)>> + 'a>> {
|
) -> Result<Box<dyn Iterator<Item = Result<(PduCount, PduEvent)>> + 'a>> {
|
||||||
let (prefix, current) = count_to_id(&room_id, from, 1, false)?;
|
let (prefix, current) = count_to_id(room_id, from, 1, false)?;
|
||||||
|
|
||||||
let user_id = user_id.to_owned();
|
let user_id = user_id.to_owned();
|
||||||
|
|
||||||
|
@ -332,7 +331,7 @@ fn count_to_id(
|
||||||
.rooms
|
.rooms
|
||||||
.short
|
.short
|
||||||
.get_shortroomid(room_id)?
|
.get_shortroomid(room_id)?
|
||||||
.expect("room exists")
|
.ok_or_else(|| Error::bad_database("Looked for bad shortroomid in timeline"))?
|
||||||
.to_be_bytes()
|
.to_be_bytes()
|
||||||
.to_vec();
|
.to_vec();
|
||||||
let mut pdu_id = prefix.clone();
|
let mut pdu_id = prefix.clone();
|
||||||
|
|
|
@ -146,10 +146,9 @@ impl service::users::Data for KeyValueDatabase {
|
||||||
self.userid_avatarurl
|
self.userid_avatarurl
|
||||||
.get(user_id.as_bytes())?
|
.get(user_id.as_bytes())?
|
||||||
.map(|bytes| {
|
.map(|bytes| {
|
||||||
let s = utils::string_from_bytes(&bytes)
|
utils::string_from_bytes(&bytes)
|
||||||
.map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?;
|
|
||||||
s.try_into()
|
|
||||||
.map_err(|_| Error::bad_database("Avatar URL in db is invalid."))
|
.map_err(|_| Error::bad_database("Avatar URL in db is invalid."))
|
||||||
|
.map(Into::into)
|
||||||
})
|
})
|
||||||
.transpose()
|
.transpose()
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,6 +8,7 @@ use crate::{
|
||||||
use abstraction::{KeyValueDatabaseEngine, KvTree};
|
use abstraction::{KeyValueDatabaseEngine, KvTree};
|
||||||
use directories::ProjectDirs;
|
use directories::ProjectDirs;
|
||||||
use lru_cache::LruCache;
|
use lru_cache::LruCache;
|
||||||
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
events::{
|
events::{
|
||||||
push_rules::{PushRulesEvent, PushRulesEventContent},
|
push_rules::{PushRulesEvent, PushRulesEventContent},
|
||||||
|
@ -70,8 +71,6 @@ pub struct KeyValueDatabase {
|
||||||
pub(super) readreceiptid_readreceipt: Arc<dyn KvTree>, // ReadReceiptId = RoomId + Count + UserId
|
pub(super) readreceiptid_readreceipt: Arc<dyn KvTree>, // ReadReceiptId = RoomId + Count + UserId
|
||||||
pub(super) roomuserid_privateread: Arc<dyn KvTree>, // RoomUserId = Room + User, PrivateRead = Count
|
pub(super) roomuserid_privateread: Arc<dyn KvTree>, // RoomUserId = Room + User, PrivateRead = Count
|
||||||
pub(super) roomuserid_lastprivatereadupdate: Arc<dyn KvTree>, // LastPrivateReadUpdate = Count
|
pub(super) roomuserid_lastprivatereadupdate: Arc<dyn KvTree>, // LastPrivateReadUpdate = Count
|
||||||
pub(super) typingid_userid: Arc<dyn KvTree>, // TypingId = RoomId + TimeoutTime + Count
|
|
||||||
pub(super) roomid_lasttypingupdate: Arc<dyn KvTree>, // LastRoomTypingUpdate = Count
|
|
||||||
pub(super) presenceid_presence: Arc<dyn KvTree>, // PresenceId = RoomId + Count + UserId
|
pub(super) presenceid_presence: Arc<dyn KvTree>, // PresenceId = RoomId + Count + UserId
|
||||||
pub(super) userid_lastpresenceupdate: Arc<dyn KvTree>, // LastPresenceUpdate = Count
|
pub(super) userid_lastpresenceupdate: Arc<dyn KvTree>, // LastPresenceUpdate = Count
|
||||||
|
|
||||||
|
@ -162,7 +161,6 @@ pub struct KeyValueDatabase {
|
||||||
//pub pusher: pusher::PushData,
|
//pub pusher: pusher::PushData,
|
||||||
pub(super) senderkey_pusher: Arc<dyn KvTree>,
|
pub(super) senderkey_pusher: Arc<dyn KvTree>,
|
||||||
|
|
||||||
pub(super) cached_registrations: Arc<RwLock<HashMap<String, serde_yaml::Value>>>,
|
|
||||||
pub(super) pdu_cache: Mutex<LruCache<OwnedEventId, Arc<PduEvent>>>,
|
pub(super) pdu_cache: Mutex<LruCache<OwnedEventId, Arc<PduEvent>>>,
|
||||||
pub(super) shorteventid_cache: Mutex<LruCache<u64, Arc<EventId>>>,
|
pub(super) shorteventid_cache: Mutex<LruCache<u64, Arc<EventId>>>,
|
||||||
pub(super) auth_chain_cache: Mutex<LruCache<Vec<u64>, Arc<HashSet<u64>>>>,
|
pub(super) auth_chain_cache: Mutex<LruCache<Vec<u64>, Arc<HashSet<u64>>>>,
|
||||||
|
@ -301,8 +299,6 @@ impl KeyValueDatabase {
|
||||||
roomuserid_privateread: builder.open_tree("roomuserid_privateread")?, // "Private" read receipt
|
roomuserid_privateread: builder.open_tree("roomuserid_privateread")?, // "Private" read receipt
|
||||||
roomuserid_lastprivatereadupdate: builder
|
roomuserid_lastprivatereadupdate: builder
|
||||||
.open_tree("roomuserid_lastprivatereadupdate")?,
|
.open_tree("roomuserid_lastprivatereadupdate")?,
|
||||||
typingid_userid: builder.open_tree("typingid_userid")?,
|
|
||||||
roomid_lasttypingupdate: builder.open_tree("roomid_lasttypingupdate")?,
|
|
||||||
presenceid_presence: builder.open_tree("presenceid_presence")?,
|
presenceid_presence: builder.open_tree("presenceid_presence")?,
|
||||||
userid_lastpresenceupdate: builder.open_tree("userid_lastpresenceupdate")?,
|
userid_lastpresenceupdate: builder.open_tree("userid_lastpresenceupdate")?,
|
||||||
pduid_pdu: builder.open_tree("pduid_pdu")?,
|
pduid_pdu: builder.open_tree("pduid_pdu")?,
|
||||||
|
@ -372,7 +368,6 @@ impl KeyValueDatabase {
|
||||||
global: builder.open_tree("global")?,
|
global: builder.open_tree("global")?,
|
||||||
server_signingkeys: builder.open_tree("server_signingkeys")?,
|
server_signingkeys: builder.open_tree("server_signingkeys")?,
|
||||||
|
|
||||||
cached_registrations: Arc::new(RwLock::new(HashMap::new())),
|
|
||||||
pdu_cache: Mutex::new(LruCache::new(
|
pdu_cache: Mutex::new(LruCache::new(
|
||||||
config
|
config
|
||||||
.pdu_cache_capacity
|
.pdu_cache_capacity
|
||||||
|
@ -852,7 +847,9 @@ impl KeyValueDatabase {
|
||||||
if rule.is_some() {
|
if rule.is_some() {
|
||||||
let mut rule = rule.unwrap().clone();
|
let mut rule = rule.unwrap().clone();
|
||||||
rule.rule_id = content_rule_transformation[1].to_owned();
|
rule.rule_id = content_rule_transformation[1].to_owned();
|
||||||
rules_list.content.remove(content_rule_transformation[0]);
|
rules_list
|
||||||
|
.content
|
||||||
|
.shift_remove(content_rule_transformation[0]);
|
||||||
rules_list.content.insert(rule);
|
rules_list.content.insert(rule);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -875,7 +872,7 @@ impl KeyValueDatabase {
|
||||||
if let Some(rule) = rule {
|
if let Some(rule) = rule {
|
||||||
let mut rule = rule.clone();
|
let mut rule = rule.clone();
|
||||||
rule.rule_id = transformation[1].to_owned();
|
rule.rule_id = transformation[1].to_owned();
|
||||||
rules_list.underride.remove(transformation[0]);
|
rules_list.underride.shift_remove(transformation[0]);
|
||||||
rules_list.underride.insert(rule);
|
rules_list.underride.insert(rule);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
13
src/lib.rs
13
src/lib.rs
|
@ -1,18 +1,13 @@
|
||||||
#![warn(
|
|
||||||
rust_2018_idioms,
|
|
||||||
unused_qualifications,
|
|
||||||
clippy::cloned_instead_of_copied,
|
|
||||||
clippy::str_to_string
|
|
||||||
)]
|
|
||||||
#![allow(clippy::suspicious_else_formatting)]
|
|
||||||
#![deny(clippy::dbg_macro)]
|
|
||||||
|
|
||||||
pub mod api;
|
pub mod api;
|
||||||
|
pub mod clap;
|
||||||
mod config;
|
mod config;
|
||||||
mod database;
|
mod database;
|
||||||
mod service;
|
mod service;
|
||||||
mod utils;
|
mod utils;
|
||||||
|
|
||||||
|
// Not async due to services() being used in many closures, and async closures are not stable as of writing
|
||||||
|
// This is the case for every other occurence of sync Mutex/RwLock, except for database related ones, where
|
||||||
|
// the current maintainer (Timo) has asked to not modify those
|
||||||
use std::sync::RwLock;
|
use std::sync::RwLock;
|
||||||
|
|
||||||
pub use api::ruma_wrapper::{Ruma, RumaResponse};
|
pub use api::ruma_wrapper::{Ruma, RumaResponse};
|
||||||
|
|
20
src/main.rs
20
src/main.rs
|
@ -1,13 +1,3 @@
|
||||||
#![warn(
|
|
||||||
rust_2018_idioms,
|
|
||||||
unused_qualifications,
|
|
||||||
clippy::cloned_instead_of_copied,
|
|
||||||
clippy::str_to_string,
|
|
||||||
clippy::future_not_send
|
|
||||||
)]
|
|
||||||
#![allow(clippy::suspicious_else_formatting)]
|
|
||||||
#![deny(clippy::dbg_macro)]
|
|
||||||
|
|
||||||
use std::{future::Future, io, net::SocketAddr, sync::atomic, time::Duration};
|
use std::{future::Future, io, net::SocketAddr, sync::atomic, time::Duration};
|
||||||
|
|
||||||
use axum::{
|
use axum::{
|
||||||
|
@ -54,6 +44,8 @@ static GLOBAL: Jemalloc = Jemalloc;
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
|
clap::parse();
|
||||||
|
|
||||||
// Initialize config
|
// Initialize config
|
||||||
let raw_config =
|
let raw_config =
|
||||||
Figment::new()
|
Figment::new()
|
||||||
|
@ -75,8 +67,6 @@ async fn main() {
|
||||||
|
|
||||||
config.warn_deprecated();
|
config.warn_deprecated();
|
||||||
|
|
||||||
let log = format!("{},ruma_state_res=error,_=off,sled=off", config.log);
|
|
||||||
|
|
||||||
if config.allow_jaeger {
|
if config.allow_jaeger {
|
||||||
opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new());
|
opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new());
|
||||||
let tracer = opentelemetry_jaeger::new_agent_pipeline()
|
let tracer = opentelemetry_jaeger::new_agent_pipeline()
|
||||||
|
@ -86,7 +76,7 @@ async fn main() {
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
|
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
|
||||||
|
|
||||||
let filter_layer = match EnvFilter::try_new(&log) {
|
let filter_layer = match EnvFilter::try_new(&config.log) {
|
||||||
Ok(s) => s,
|
Ok(s) => s,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
eprintln!(
|
eprintln!(
|
||||||
|
@ -113,7 +103,7 @@ async fn main() {
|
||||||
} else {
|
} else {
|
||||||
let registry = tracing_subscriber::Registry::default();
|
let registry = tracing_subscriber::Registry::default();
|
||||||
let fmt_layer = tracing_subscriber::fmt::Layer::new();
|
let fmt_layer = tracing_subscriber::fmt::Layer::new();
|
||||||
let filter_layer = match EnvFilter::try_new(&log) {
|
let filter_layer = match EnvFilter::try_new(&config.log) {
|
||||||
Ok(s) => s,
|
Ok(s) => s,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
eprintln!("It looks like your config is invalid. The following error occured while parsing it: {e}");
|
eprintln!("It looks like your config is invalid. The following error occured while parsing it: {e}");
|
||||||
|
@ -238,7 +228,7 @@ async fn spawn_task<B: Send + 'static>(
|
||||||
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)
|
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn unrecognized_method<B>(
|
async fn unrecognized_method<B: Send>(
|
||||||
req: axum::http::Request<B>,
|
req: axum::http::Request<B>,
|
||||||
next: axum::middleware::Next<B>,
|
next: axum::middleware::Next<B>,
|
||||||
) -> std::result::Result<axum::response::Response, StatusCode> {
|
) -> std::result::Result<axum::response::Response, StatusCode> {
|
||||||
|
|
|
@ -1,13 +1,14 @@
|
||||||
use std::{
|
use std::{
|
||||||
collections::BTreeMap,
|
collections::BTreeMap,
|
||||||
convert::{TryFrom, TryInto},
|
convert::{TryFrom, TryInto},
|
||||||
sync::{Arc, RwLock},
|
sync::Arc,
|
||||||
time::Instant,
|
time::Instant,
|
||||||
};
|
};
|
||||||
|
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
|
api::appservice::Registration,
|
||||||
events::{
|
events::{
|
||||||
room::{
|
room::{
|
||||||
canonical_alias::RoomCanonicalAliasEventContent,
|
canonical_alias::RoomCanonicalAliasEventContent,
|
||||||
|
@ -23,10 +24,10 @@ use ruma::{
|
||||||
},
|
},
|
||||||
TimelineEventType,
|
TimelineEventType,
|
||||||
},
|
},
|
||||||
EventId, OwnedRoomAliasId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
|
EventId, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
|
||||||
};
|
};
|
||||||
use serde_json::value::to_raw_value;
|
use serde_json::value::to_raw_value;
|
||||||
use tokio::sync::{mpsc, Mutex, MutexGuard};
|
use tokio::sync::{mpsc, Mutex, RwLock};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::client_server::{leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH},
|
api::client_server::{leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH},
|
||||||
|
@ -50,7 +51,7 @@ enum AdminCommand {
|
||||||
/// Registering a new bridge using the ID of an existing bridge will replace
|
/// Registering a new bridge using the ID of an existing bridge will replace
|
||||||
/// the old one.
|
/// the old one.
|
||||||
///
|
///
|
||||||
/// [commandbody]
|
/// [commandbody]()
|
||||||
/// # ```
|
/// # ```
|
||||||
/// # yaml content here
|
/// # yaml content here
|
||||||
/// # ```
|
/// # ```
|
||||||
|
@ -96,7 +97,7 @@ enum AdminCommand {
|
||||||
/// Removing a mass amount of users from a room may cause a significant amount of leave events.
|
/// Removing a mass amount of users from a room may cause a significant amount of leave events.
|
||||||
/// The time to leave rooms may depend significantly on joined rooms and servers.
|
/// The time to leave rooms may depend significantly on joined rooms and servers.
|
||||||
///
|
///
|
||||||
/// [commandbody]
|
/// [commandbody]()
|
||||||
/// # ```
|
/// # ```
|
||||||
/// # User list here
|
/// # User list here
|
||||||
/// # ```
|
/// # ```
|
||||||
|
@ -121,7 +122,7 @@ enum AdminCommand {
|
||||||
/// The PDU event is only checked for validity and is not added to the
|
/// The PDU event is only checked for validity and is not added to the
|
||||||
/// database.
|
/// database.
|
||||||
///
|
///
|
||||||
/// [commandbody]
|
/// [commandbody]()
|
||||||
/// # ```
|
/// # ```
|
||||||
/// # PDU json content here
|
/// # PDU json content here
|
||||||
/// # ```
|
/// # ```
|
||||||
|
@ -165,14 +166,14 @@ enum AdminCommand {
|
||||||
EnableRoom { room_id: Box<RoomId> },
|
EnableRoom { room_id: Box<RoomId> },
|
||||||
|
|
||||||
/// Verify json signatures
|
/// Verify json signatures
|
||||||
/// [commandbody]
|
/// [commandbody]()
|
||||||
/// # ```
|
/// # ```
|
||||||
/// # json here
|
/// # json here
|
||||||
/// # ```
|
/// # ```
|
||||||
SignJson,
|
SignJson,
|
||||||
|
|
||||||
/// Verify json signatures
|
/// Verify json signatures
|
||||||
/// [commandbody]
|
/// [commandbody]()
|
||||||
/// # ```
|
/// # ```
|
||||||
/// # json here
|
/// # json here
|
||||||
/// # ```
|
/// # ```
|
||||||
|
@ -214,60 +215,44 @@ impl Service {
|
||||||
let conduit_user = UserId::parse(format!("@conduit:{}", services().globals.server_name()))
|
let conduit_user = UserId::parse(format!("@conduit:{}", services().globals.server_name()))
|
||||||
.expect("@conduit:server_name is valid");
|
.expect("@conduit:server_name is valid");
|
||||||
|
|
||||||
let conduit_room = services()
|
if let Ok(Some(conduit_room)) = services().admin.get_admin_room() {
|
||||||
.rooms
|
loop {
|
||||||
.alias
|
tokio::select! {
|
||||||
.resolve_local_alias(
|
Some(event) = receiver.recv() => {
|
||||||
format!("#admins:{}", services().globals.server_name())
|
let message_content = match event {
|
||||||
.as_str()
|
AdminRoomEvent::SendMessage(content) => content,
|
||||||
.try_into()
|
AdminRoomEvent::ProcessMessage(room_message) => self.process_admin_message(room_message).await
|
||||||
.expect("#admins:server_name is a valid room alias"),
|
};
|
||||||
)
|
|
||||||
.expect("Database data for admin room alias must be valid")
|
|
||||||
.expect("Admin room must exist");
|
|
||||||
|
|
||||||
let send_message = |message: RoomMessageEventContent, mutex_lock: &MutexGuard<'_, ()>| {
|
let mutex_state = Arc::clone(
|
||||||
services()
|
services().globals
|
||||||
.rooms
|
.roomid_mutex_state
|
||||||
.timeline
|
.write()
|
||||||
.build_and_append_pdu(
|
.await
|
||||||
PduBuilder {
|
.entry(conduit_room.to_owned())
|
||||||
event_type: TimelineEventType::RoomMessage,
|
.or_default(),
|
||||||
content: to_raw_value(&message)
|
);
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: None,
|
|
||||||
redacts: None,
|
|
||||||
},
|
|
||||||
&conduit_user,
|
|
||||||
&conduit_room,
|
|
||||||
mutex_lock,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
};
|
|
||||||
|
|
||||||
loop {
|
let state_lock = mutex_state.lock().await;
|
||||||
tokio::select! {
|
|
||||||
Some(event) = receiver.recv() => {
|
|
||||||
let message_content = match event {
|
|
||||||
AdminRoomEvent::SendMessage(content) => content,
|
|
||||||
AdminRoomEvent::ProcessMessage(room_message) => self.process_admin_message(room_message).await
|
|
||||||
};
|
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
services()
|
||||||
services().globals
|
.rooms
|
||||||
.roomid_mutex_state
|
.timeline
|
||||||
.write()
|
.build_and_append_pdu(
|
||||||
.unwrap()
|
PduBuilder {
|
||||||
.entry(conduit_room.to_owned())
|
event_type: TimelineEventType::RoomMessage,
|
||||||
.or_default(),
|
content: to_raw_value(&message_content)
|
||||||
);
|
.expect("event is valid, we just created it"),
|
||||||
|
unsigned: None,
|
||||||
let state_lock = mutex_state.lock().await;
|
state_key: None,
|
||||||
|
redacts: None,
|
||||||
send_message(message_content, &state_lock);
|
},
|
||||||
|
&conduit_user,
|
||||||
drop(state_lock);
|
&conduit_room,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await.unwrap();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -351,10 +336,9 @@ impl Service {
|
||||||
if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```"
|
if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```"
|
||||||
{
|
{
|
||||||
let appservice_config = body[1..body.len() - 1].join("\n");
|
let appservice_config = body[1..body.len() - 1].join("\n");
|
||||||
let parsed_config =
|
let parsed_config = serde_yaml::from_str::<Registration>(&appservice_config);
|
||||||
serde_yaml::from_str::<serde_yaml::Value>(&appservice_config);
|
|
||||||
match parsed_config {
|
match parsed_config {
|
||||||
Ok(yaml) => match services().appservice.register_appservice(yaml) {
|
Ok(yaml) => match services().appservice.register_appservice(yaml).await {
|
||||||
Ok(id) => RoomMessageEventContent::text_plain(format!(
|
Ok(id) => RoomMessageEventContent::text_plain(format!(
|
||||||
"Appservice registered with ID: {id}."
|
"Appservice registered with ID: {id}."
|
||||||
)),
|
)),
|
||||||
|
@ -377,6 +361,7 @@ impl Service {
|
||||||
} => match services()
|
} => match services()
|
||||||
.appservice
|
.appservice
|
||||||
.unregister_appservice(&appservice_identifier)
|
.unregister_appservice(&appservice_identifier)
|
||||||
|
.await
|
||||||
{
|
{
|
||||||
Ok(()) => RoomMessageEventContent::text_plain("Appservice unregistered."),
|
Ok(()) => RoomMessageEventContent::text_plain("Appservice unregistered."),
|
||||||
Err(e) => RoomMessageEventContent::text_plain(format!(
|
Err(e) => RoomMessageEventContent::text_plain(format!(
|
||||||
|
@ -384,25 +369,13 @@ impl Service {
|
||||||
)),
|
)),
|
||||||
},
|
},
|
||||||
AdminCommand::ListAppservices => {
|
AdminCommand::ListAppservices => {
|
||||||
if let Ok(appservices) = services()
|
let appservices = services().appservice.iter_ids().await;
|
||||||
.appservice
|
let output = format!(
|
||||||
.iter_ids()
|
"Appservices ({}): {}",
|
||||||
.map(|ids| ids.collect::<Vec<_>>())
|
appservices.len(),
|
||||||
{
|
appservices.join(", ")
|
||||||
let count = appservices.len();
|
);
|
||||||
let output = format!(
|
RoomMessageEventContent::text_plain(output)
|
||||||
"Appservices ({}): {}",
|
|
||||||
count,
|
|
||||||
appservices
|
|
||||||
.into_iter()
|
|
||||||
.filter_map(|r| r.ok())
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
.join(", ")
|
|
||||||
);
|
|
||||||
RoomMessageEventContent::text_plain(output)
|
|
||||||
} else {
|
|
||||||
RoomMessageEventContent::text_plain("Failed to get appservices.")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
AdminCommand::ListRooms => {
|
AdminCommand::ListRooms => {
|
||||||
let room_ids = services().rooms.metadata.iter_ids();
|
let room_ids = services().rooms.metadata.iter_ids();
|
||||||
|
@ -434,11 +407,7 @@ impl Service {
|
||||||
Err(e) => RoomMessageEventContent::text_plain(e.to_string()),
|
Err(e) => RoomMessageEventContent::text_plain(e.to_string()),
|
||||||
},
|
},
|
||||||
AdminCommand::IncomingFederation => {
|
AdminCommand::IncomingFederation => {
|
||||||
let map = services()
|
let map = services().globals.roomid_federationhandletime.read().await;
|
||||||
.globals
|
|
||||||
.roomid_federationhandletime
|
|
||||||
.read()
|
|
||||||
.unwrap();
|
|
||||||
let mut msg: String = format!("Handling {} incoming pdus:\n", map.len());
|
let mut msg: String = format!("Handling {} incoming pdus:\n", map.len());
|
||||||
|
|
||||||
for (r, (e, i)) in map.iter() {
|
for (r, (e, i)) in map.iter() {
|
||||||
|
@ -552,7 +521,7 @@ impl Service {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
AdminCommand::MemoryUsage => {
|
AdminCommand::MemoryUsage => {
|
||||||
let response1 = services().memory_usage();
|
let response1 = services().memory_usage().await;
|
||||||
let response2 = services().globals.db.memory_usage();
|
let response2 = services().globals.db.memory_usage();
|
||||||
|
|
||||||
RoomMessageEventContent::text_plain(format!(
|
RoomMessageEventContent::text_plain(format!(
|
||||||
|
@ -565,7 +534,7 @@ impl Service {
|
||||||
RoomMessageEventContent::text_plain("Done.")
|
RoomMessageEventContent::text_plain("Done.")
|
||||||
}
|
}
|
||||||
AdminCommand::ClearServiceCaches { amount } => {
|
AdminCommand::ClearServiceCaches { amount } => {
|
||||||
services().clear_caches(amount);
|
services().clear_caches(amount).await;
|
||||||
|
|
||||||
RoomMessageEventContent::text_plain("Done.")
|
RoomMessageEventContent::text_plain("Done.")
|
||||||
}
|
}
|
||||||
|
@ -586,6 +555,13 @@ impl Service {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Checks if user is local
|
||||||
|
if user_id.server_name() != services().globals.server_name() {
|
||||||
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"The specified user is not from this server!",
|
||||||
|
));
|
||||||
|
};
|
||||||
|
|
||||||
// Check if the specified user is valid
|
// Check if the specified user is valid
|
||||||
if !services().users.exists(&user_id)?
|
if !services().users.exists(&user_id)?
|
||||||
|| user_id
|
|| user_id
|
||||||
|
@ -689,7 +665,15 @@ impl Service {
|
||||||
user_id,
|
user_id,
|
||||||
} => {
|
} => {
|
||||||
let user_id = Arc::<UserId>::from(user_id);
|
let user_id = Arc::<UserId>::from(user_id);
|
||||||
if services().users.exists(&user_id)? {
|
if !services().users.exists(&user_id)? {
|
||||||
|
RoomMessageEventContent::text_plain(format!(
|
||||||
|
"User {user_id} doesn't exist on this server"
|
||||||
|
))
|
||||||
|
} else if user_id.server_name() != services().globals.server_name() {
|
||||||
|
RoomMessageEventContent::text_plain(format!(
|
||||||
|
"User {user_id} is not from this server"
|
||||||
|
))
|
||||||
|
} else {
|
||||||
RoomMessageEventContent::text_plain(format!(
|
RoomMessageEventContent::text_plain(format!(
|
||||||
"Making {user_id} leave all rooms before deactivation..."
|
"Making {user_id} leave all rooms before deactivation..."
|
||||||
));
|
));
|
||||||
|
@ -703,30 +687,76 @@ impl Service {
|
||||||
RoomMessageEventContent::text_plain(format!(
|
RoomMessageEventContent::text_plain(format!(
|
||||||
"User {user_id} has been deactivated"
|
"User {user_id} has been deactivated"
|
||||||
))
|
))
|
||||||
} else {
|
|
||||||
RoomMessageEventContent::text_plain(format!(
|
|
||||||
"User {user_id} doesn't exist on this server"
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
AdminCommand::DeactivateAll { leave_rooms, force } => {
|
AdminCommand::DeactivateAll { leave_rooms, force } => {
|
||||||
if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```"
|
if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```"
|
||||||
{
|
{
|
||||||
let usernames = body.clone().drain(1..body.len() - 1).collect::<Vec<_>>();
|
let users = body.clone().drain(1..body.len() - 1).collect::<Vec<_>>();
|
||||||
|
|
||||||
let mut user_ids: Vec<&UserId> = Vec::new();
|
let mut user_ids = Vec::new();
|
||||||
|
let mut remote_ids = Vec::new();
|
||||||
|
let mut non_existant_ids = Vec::new();
|
||||||
|
let mut invalid_users = Vec::new();
|
||||||
|
|
||||||
for &username in &usernames {
|
for &user in &users {
|
||||||
match <&UserId>::try_from(username) {
|
match <&UserId>::try_from(user) {
|
||||||
Ok(user_id) => user_ids.push(user_id),
|
Ok(user_id) => {
|
||||||
|
if user_id.server_name() != services().globals.server_name() {
|
||||||
|
remote_ids.push(user_id)
|
||||||
|
} else if !services().users.exists(user_id)? {
|
||||||
|
non_existant_ids.push(user_id)
|
||||||
|
} else {
|
||||||
|
user_ids.push(user_id)
|
||||||
|
}
|
||||||
|
}
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
invalid_users.push(user);
|
||||||
"{username} is not a valid username"
|
|
||||||
)))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let mut markdown_message = String::new();
|
||||||
|
let mut html_message = String::new();
|
||||||
|
if !invalid_users.is_empty() {
|
||||||
|
markdown_message.push_str("The following user ids are not valid:\n```\n");
|
||||||
|
html_message.push_str("The following user ids are not valid:\n<pre>\n");
|
||||||
|
for invalid_user in invalid_users {
|
||||||
|
markdown_message.push_str(&format!("{invalid_user}\n"));
|
||||||
|
html_message.push_str(&format!("{invalid_user}\n"));
|
||||||
|
}
|
||||||
|
markdown_message.push_str("```\n\n");
|
||||||
|
html_message.push_str("</pre>\n\n");
|
||||||
|
}
|
||||||
|
if !remote_ids.is_empty() {
|
||||||
|
markdown_message
|
||||||
|
.push_str("The following users are not from this server:\n```\n");
|
||||||
|
html_message
|
||||||
|
.push_str("The following users are not from this server:\n<pre>\n");
|
||||||
|
for remote_id in remote_ids {
|
||||||
|
markdown_message.push_str(&format!("{remote_id}\n"));
|
||||||
|
html_message.push_str(&format!("{remote_id}\n"));
|
||||||
|
}
|
||||||
|
markdown_message.push_str("```\n\n");
|
||||||
|
html_message.push_str("</pre>\n\n");
|
||||||
|
}
|
||||||
|
if !non_existant_ids.is_empty() {
|
||||||
|
markdown_message.push_str("The following users do not exist:\n```\n");
|
||||||
|
html_message.push_str("The following users do not exist:\n<pre>\n");
|
||||||
|
for non_existant_id in non_existant_ids {
|
||||||
|
markdown_message.push_str(&format!("{non_existant_id}\n"));
|
||||||
|
html_message.push_str(&format!("{non_existant_id}\n"));
|
||||||
|
}
|
||||||
|
markdown_message.push_str("```\n\n");
|
||||||
|
html_message.push_str("</pre>\n\n");
|
||||||
|
}
|
||||||
|
if !markdown_message.is_empty() {
|
||||||
|
return Ok(RoomMessageEventContent::text_html(
|
||||||
|
markdown_message,
|
||||||
|
html_message,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
let mut deactivation_count = 0;
|
let mut deactivation_count = 0;
|
||||||
let mut admins = Vec::new();
|
let mut admins = Vec::new();
|
||||||
|
|
||||||
|
@ -806,7 +836,7 @@ impl Service {
|
||||||
.fetch_required_signing_keys(&value, &pub_key_map)
|
.fetch_required_signing_keys(&value, &pub_key_map)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let pub_key_map = pub_key_map.read().unwrap();
|
let pub_key_map = pub_key_map.read().await;
|
||||||
match ruma::signatures::verify_json(&pub_key_map, &value) {
|
match ruma::signatures::verify_json(&pub_key_map, &value) {
|
||||||
Ok(_) => RoomMessageEventContent::text_plain("Signature correct"),
|
Ok(_) => RoomMessageEventContent::text_plain("Signature correct"),
|
||||||
Err(e) => RoomMessageEventContent::text_plain(format!(
|
Err(e) => RoomMessageEventContent::text_plain(format!(
|
||||||
|
@ -858,12 +888,15 @@ impl Service {
|
||||||
.expect("Regex compilation should not fail");
|
.expect("Regex compilation should not fail");
|
||||||
let text = re.replace_all(&text, "<code>$1</code>: $4");
|
let text = re.replace_all(&text, "<code>$1</code>: $4");
|
||||||
|
|
||||||
// Look for a `[commandbody]` tag. If it exists, use all lines below it that
|
// Look for a `[commandbody]()` tag. If it exists, use all lines below it that
|
||||||
// start with a `#` in the USAGE section.
|
// start with a `#` in the USAGE section.
|
||||||
let mut text_lines: Vec<&str> = text.lines().collect();
|
let mut text_lines: Vec<&str> = text.lines().collect();
|
||||||
let mut command_body = String::new();
|
let mut command_body = String::new();
|
||||||
|
|
||||||
if let Some(line_index) = text_lines.iter().position(|line| *line == "[commandbody]") {
|
if let Some(line_index) = text_lines
|
||||||
|
.iter()
|
||||||
|
.position(|line| *line == "[commandbody]()")
|
||||||
|
{
|
||||||
text_lines.remove(line_index);
|
text_lines.remove(line_index);
|
||||||
|
|
||||||
while text_lines
|
while text_lines
|
||||||
|
@ -919,7 +952,7 @@ impl Service {
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.entry(room_id.clone())
|
.entry(room_id.clone())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
|
@ -952,170 +985,223 @@ impl Service {
|
||||||
content.room_version = room_version;
|
content.room_version = room_version;
|
||||||
|
|
||||||
// 1. The room create event
|
// 1. The room create event
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomCreate,
|
.timeline
|
||||||
content: to_raw_value(&content).expect("event is valid, we just created it"),
|
.build_and_append_pdu(
|
||||||
unsigned: None,
|
PduBuilder {
|
||||||
state_key: Some("".to_owned()),
|
event_type: TimelineEventType::RoomCreate,
|
||||||
redacts: None,
|
content: to_raw_value(&content).expect("event is valid, we just created it"),
|
||||||
},
|
unsigned: None,
|
||||||
&conduit_user,
|
state_key: Some("".to_owned()),
|
||||||
&room_id,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
&conduit_user,
|
||||||
|
&room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// 2. Make conduit bot join
|
// 2. Make conduit bot join
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomMember,
|
.timeline
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
.build_and_append_pdu(
|
||||||
membership: MembershipState::Join,
|
PduBuilder {
|
||||||
displayname: None,
|
event_type: TimelineEventType::RoomMember,
|
||||||
avatar_url: None,
|
content: to_raw_value(&RoomMemberEventContent {
|
||||||
is_direct: None,
|
membership: MembershipState::Join,
|
||||||
third_party_invite: None,
|
displayname: None,
|
||||||
blurhash: None,
|
avatar_url: None,
|
||||||
reason: None,
|
is_direct: None,
|
||||||
join_authorized_via_users_server: None,
|
third_party_invite: None,
|
||||||
})
|
blurhash: None,
|
||||||
.expect("event is valid, we just created it"),
|
reason: None,
|
||||||
unsigned: None,
|
join_authorized_via_users_server: None,
|
||||||
state_key: Some(conduit_user.to_string()),
|
})
|
||||||
redacts: None,
|
.expect("event is valid, we just created it"),
|
||||||
},
|
unsigned: None,
|
||||||
&conduit_user,
|
state_key: Some(conduit_user.to_string()),
|
||||||
&room_id,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
&conduit_user,
|
||||||
|
&room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// 3. Power levels
|
// 3. Power levels
|
||||||
let mut users = BTreeMap::new();
|
let mut users = BTreeMap::new();
|
||||||
users.insert(conduit_user.clone(), 100.into());
|
users.insert(conduit_user.clone(), 100.into());
|
||||||
|
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomPowerLevels,
|
.timeline
|
||||||
content: to_raw_value(&RoomPowerLevelsEventContent {
|
.build_and_append_pdu(
|
||||||
users,
|
PduBuilder {
|
||||||
..Default::default()
|
event_type: TimelineEventType::RoomPowerLevels,
|
||||||
})
|
content: to_raw_value(&RoomPowerLevelsEventContent {
|
||||||
.expect("event is valid, we just created it"),
|
users,
|
||||||
unsigned: None,
|
..Default::default()
|
||||||
state_key: Some("".to_owned()),
|
})
|
||||||
redacts: None,
|
.expect("event is valid, we just created it"),
|
||||||
},
|
unsigned: None,
|
||||||
&conduit_user,
|
state_key: Some("".to_owned()),
|
||||||
&room_id,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
&conduit_user,
|
||||||
|
&room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// 4.1 Join Rules
|
// 4.1 Join Rules
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomJoinRules,
|
.timeline
|
||||||
content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite))
|
.build_and_append_pdu(
|
||||||
.expect("event is valid, we just created it"),
|
PduBuilder {
|
||||||
unsigned: None,
|
event_type: TimelineEventType::RoomJoinRules,
|
||||||
state_key: Some("".to_owned()),
|
content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite))
|
||||||
redacts: None,
|
.expect("event is valid, we just created it"),
|
||||||
},
|
unsigned: None,
|
||||||
&conduit_user,
|
state_key: Some("".to_owned()),
|
||||||
&room_id,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
&conduit_user,
|
||||||
|
&room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// 4.2 History Visibility
|
// 4.2 History Visibility
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomHistoryVisibility,
|
.timeline
|
||||||
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
|
.build_and_append_pdu(
|
||||||
HistoryVisibility::Shared,
|
PduBuilder {
|
||||||
))
|
event_type: TimelineEventType::RoomHistoryVisibility,
|
||||||
.expect("event is valid, we just created it"),
|
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
|
||||||
unsigned: None,
|
HistoryVisibility::Shared,
|
||||||
state_key: Some("".to_owned()),
|
))
|
||||||
redacts: None,
|
.expect("event is valid, we just created it"),
|
||||||
},
|
unsigned: None,
|
||||||
&conduit_user,
|
state_key: Some("".to_owned()),
|
||||||
&room_id,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
&conduit_user,
|
||||||
|
&room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// 4.3 Guest Access
|
// 4.3 Guest Access
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomGuestAccess,
|
.timeline
|
||||||
content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden))
|
.build_and_append_pdu(
|
||||||
|
PduBuilder {
|
||||||
|
event_type: TimelineEventType::RoomGuestAccess,
|
||||||
|
content: to_raw_value(&RoomGuestAccessEventContent::new(
|
||||||
|
GuestAccess::Forbidden,
|
||||||
|
))
|
||||||
.expect("event is valid, we just created it"),
|
.expect("event is valid, we just created it"),
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
},
|
},
|
||||||
&conduit_user,
|
&conduit_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// 5. Events implied by name and topic
|
// 5. Events implied by name and topic
|
||||||
let room_name = format!("{} Admin Room", services().globals.server_name());
|
let room_name = format!("{} Admin Room", services().globals.server_name());
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomName,
|
.timeline
|
||||||
content: to_raw_value(&RoomNameEventContent::new(Some(room_name)))
|
.build_and_append_pdu(
|
||||||
.expect("event is valid, we just created it"),
|
PduBuilder {
|
||||||
unsigned: None,
|
event_type: TimelineEventType::RoomName,
|
||||||
state_key: Some("".to_owned()),
|
content: to_raw_value(&RoomNameEventContent::new(room_name))
|
||||||
redacts: None,
|
.expect("event is valid, we just created it"),
|
||||||
},
|
unsigned: None,
|
||||||
&conduit_user,
|
state_key: Some("".to_owned()),
|
||||||
&room_id,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
&conduit_user,
|
||||||
|
&room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomTopic,
|
.timeline
|
||||||
content: to_raw_value(&RoomTopicEventContent {
|
.build_and_append_pdu(
|
||||||
topic: format!("Manage {}", services().globals.server_name()),
|
PduBuilder {
|
||||||
})
|
event_type: TimelineEventType::RoomTopic,
|
||||||
.expect("event is valid, we just created it"),
|
content: to_raw_value(&RoomTopicEventContent {
|
||||||
unsigned: None,
|
topic: format!("Manage {}", services().globals.server_name()),
|
||||||
state_key: Some("".to_owned()),
|
})
|
||||||
redacts: None,
|
.expect("event is valid, we just created it"),
|
||||||
},
|
unsigned: None,
|
||||||
&conduit_user,
|
state_key: Some("".to_owned()),
|
||||||
&room_id,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
&conduit_user,
|
||||||
|
&room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// 6. Room alias
|
// 6. Room alias
|
||||||
let alias: OwnedRoomAliasId = format!("#admins:{}", services().globals.server_name())
|
let alias: OwnedRoomAliasId = format!("#admins:{}", services().globals.server_name())
|
||||||
.try_into()
|
.try_into()
|
||||||
.expect("#admins:server_name is a valid alias name");
|
.expect("#admins:server_name is a valid alias name");
|
||||||
|
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
services()
|
||||||
PduBuilder {
|
.rooms
|
||||||
event_type: TimelineEventType::RoomCanonicalAlias,
|
.timeline
|
||||||
content: to_raw_value(&RoomCanonicalAliasEventContent {
|
.build_and_append_pdu(
|
||||||
alias: Some(alias.clone()),
|
PduBuilder {
|
||||||
alt_aliases: Vec::new(),
|
event_type: TimelineEventType::RoomCanonicalAlias,
|
||||||
})
|
content: to_raw_value(&RoomCanonicalAliasEventContent {
|
||||||
.expect("event is valid, we just created it"),
|
alias: Some(alias.clone()),
|
||||||
unsigned: None,
|
alt_aliases: Vec::new(),
|
||||||
state_key: Some("".to_owned()),
|
})
|
||||||
redacts: None,
|
.expect("event is valid, we just created it"),
|
||||||
},
|
unsigned: None,
|
||||||
&conduit_user,
|
state_key: Some("".to_owned()),
|
||||||
&room_id,
|
redacts: None,
|
||||||
&state_lock,
|
},
|
||||||
)?;
|
&conduit_user,
|
||||||
|
&room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
services().rooms.alias.set_alias(&alias, &room_id)?;
|
services().rooms.alias.set_alias(&alias, &room_id)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Gets the room ID of the admin room
|
||||||
|
///
|
||||||
|
/// Errors are propagated from the database, and will have None if there is no admin room
|
||||||
|
pub(crate) fn get_admin_room(&self) -> Result<Option<OwnedRoomId>> {
|
||||||
|
let admin_room_alias: Box<RoomAliasId> =
|
||||||
|
format!("#admins:{}", services().globals.server_name())
|
||||||
|
.try_into()
|
||||||
|
.expect("#admins:server_name is a valid alias name");
|
||||||
|
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.alias
|
||||||
|
.resolve_local_alias(&admin_room_alias)
|
||||||
|
}
|
||||||
|
|
||||||
/// Invite the user to the conduit admin room.
|
/// Invite the user to the conduit admin room.
|
||||||
///
|
///
|
||||||
/// In conduit, this is equivalent to granting admin privileges.
|
/// In conduit, this is equivalent to granting admin privileges.
|
||||||
|
@ -1124,102 +1210,105 @@ impl Service {
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
displayname: String,
|
displayname: String,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let admin_room_alias: Box<RoomAliasId> =
|
if let Some(room_id) = services().admin.get_admin_room()? {
|
||||||
format!("#admins:{}", services().globals.server_name())
|
let mutex_state = Arc::clone(
|
||||||
.try_into()
|
services()
|
||||||
.expect("#admins:server_name is a valid alias name");
|
.globals
|
||||||
let room_id = services()
|
.roomid_mutex_state
|
||||||
.rooms
|
.write()
|
||||||
.alias
|
.await
|
||||||
.resolve_local_alias(&admin_room_alias)?
|
.entry(room_id.clone())
|
||||||
.expect("Admin room must exist");
|
.or_default(),
|
||||||
|
);
|
||||||
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
// Use the server user to grant the new admin's power level
|
||||||
|
let conduit_user =
|
||||||
|
UserId::parse_with_server_name("conduit", services().globals.server_name())
|
||||||
|
.expect("@conduit:server_name is valid");
|
||||||
|
|
||||||
|
// Invite and join the real user
|
||||||
services()
|
services()
|
||||||
.globals
|
.rooms
|
||||||
.roomid_mutex_state
|
.timeline
|
||||||
.write()
|
.build_and_append_pdu(
|
||||||
.unwrap()
|
PduBuilder {
|
||||||
.entry(room_id.clone())
|
event_type: TimelineEventType::RoomMember,
|
||||||
.or_default(),
|
content: to_raw_value(&RoomMemberEventContent {
|
||||||
);
|
membership: MembershipState::Invite,
|
||||||
let state_lock = mutex_state.lock().await;
|
displayname: None,
|
||||||
|
avatar_url: None,
|
||||||
|
is_direct: None,
|
||||||
|
third_party_invite: None,
|
||||||
|
blurhash: None,
|
||||||
|
reason: None,
|
||||||
|
join_authorized_via_users_server: None,
|
||||||
|
})
|
||||||
|
.expect("event is valid, we just created it"),
|
||||||
|
unsigned: None,
|
||||||
|
state_key: Some(user_id.to_string()),
|
||||||
|
redacts: None,
|
||||||
|
},
|
||||||
|
&conduit_user,
|
||||||
|
&room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.build_and_append_pdu(
|
||||||
|
PduBuilder {
|
||||||
|
event_type: TimelineEventType::RoomMember,
|
||||||
|
content: to_raw_value(&RoomMemberEventContent {
|
||||||
|
membership: MembershipState::Join,
|
||||||
|
displayname: Some(displayname),
|
||||||
|
avatar_url: None,
|
||||||
|
is_direct: None,
|
||||||
|
third_party_invite: None,
|
||||||
|
blurhash: None,
|
||||||
|
reason: None,
|
||||||
|
join_authorized_via_users_server: None,
|
||||||
|
})
|
||||||
|
.expect("event is valid, we just created it"),
|
||||||
|
unsigned: None,
|
||||||
|
state_key: Some(user_id.to_string()),
|
||||||
|
redacts: None,
|
||||||
|
},
|
||||||
|
user_id,
|
||||||
|
&room_id,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// Use the server user to grant the new admin's power level
|
// Set power level
|
||||||
let conduit_user =
|
let mut users = BTreeMap::new();
|
||||||
UserId::parse_with_server_name("conduit", services().globals.server_name())
|
users.insert(conduit_user.to_owned(), 100.into());
|
||||||
.expect("@conduit:server_name is valid");
|
users.insert(user_id.to_owned(), 100.into());
|
||||||
|
|
||||||
// Invite and join the real user
|
services()
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
.rooms
|
||||||
PduBuilder {
|
.timeline
|
||||||
event_type: TimelineEventType::RoomMember,
|
.build_and_append_pdu(
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
PduBuilder {
|
||||||
membership: MembershipState::Invite,
|
event_type: TimelineEventType::RoomPowerLevels,
|
||||||
displayname: None,
|
content: to_raw_value(&RoomPowerLevelsEventContent {
|
||||||
avatar_url: None,
|
users,
|
||||||
is_direct: None,
|
..Default::default()
|
||||||
third_party_invite: None,
|
})
|
||||||
blurhash: None,
|
.expect("event is valid, we just created it"),
|
||||||
reason: None,
|
unsigned: None,
|
||||||
join_authorized_via_users_server: None,
|
state_key: Some("".to_owned()),
|
||||||
})
|
redacts: None,
|
||||||
.expect("event is valid, we just created it"),
|
},
|
||||||
unsigned: None,
|
&conduit_user,
|
||||||
state_key: Some(user_id.to_string()),
|
&room_id,
|
||||||
redacts: None,
|
&state_lock,
|
||||||
},
|
)
|
||||||
&conduit_user,
|
.await?;
|
||||||
&room_id,
|
|
||||||
&state_lock,
|
|
||||||
)?;
|
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: TimelineEventType::RoomMember,
|
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
|
||||||
membership: MembershipState::Join,
|
|
||||||
displayname: Some(displayname),
|
|
||||||
avatar_url: None,
|
|
||||||
is_direct: None,
|
|
||||||
third_party_invite: None,
|
|
||||||
blurhash: None,
|
|
||||||
reason: None,
|
|
||||||
join_authorized_via_users_server: None,
|
|
||||||
})
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some(user_id.to_string()),
|
|
||||||
redacts: None,
|
|
||||||
},
|
|
||||||
user_id,
|
|
||||||
&room_id,
|
|
||||||
&state_lock,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// Set power level
|
// Send welcome message
|
||||||
let mut users = BTreeMap::new();
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
users.insert(conduit_user.to_owned(), 100.into());
|
|
||||||
users.insert(user_id.to_owned(), 100.into());
|
|
||||||
|
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: TimelineEventType::RoomPowerLevels,
|
|
||||||
content: to_raw_value(&RoomPowerLevelsEventContent {
|
|
||||||
users,
|
|
||||||
..Default::default()
|
|
||||||
})
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some("".to_owned()),
|
|
||||||
redacts: None,
|
|
||||||
},
|
|
||||||
&conduit_user,
|
|
||||||
&room_id,
|
|
||||||
&state_lock,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// Send welcome message
|
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: TimelineEventType::RoomMessage,
|
event_type: TimelineEventType::RoomMessage,
|
||||||
content: to_raw_value(&RoomMessageEventContent::text_html(
|
content: to_raw_value(&RoomMessageEventContent::text_html(
|
||||||
|
@ -1234,8 +1323,8 @@ impl Service {
|
||||||
&conduit_user,
|
&conduit_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
).await?;
|
||||||
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,8 +1,10 @@
|
||||||
|
use ruma::api::appservice::Registration;
|
||||||
|
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|
||||||
pub trait Data: Send + Sync {
|
pub trait Data: Send + Sync {
|
||||||
/// Registers an appservice and returns the ID to the caller
|
/// Registers an appservice and returns the ID to the caller
|
||||||
fn register_appservice(&self, yaml: serde_yaml::Value) -> Result<String>;
|
fn register_appservice(&self, yaml: Registration) -> Result<String>;
|
||||||
|
|
||||||
/// Remove an appservice registration
|
/// Remove an appservice registration
|
||||||
///
|
///
|
||||||
|
@ -11,9 +13,9 @@ pub trait Data: Send + Sync {
|
||||||
/// * `service_name` - the name you send to register the service previously
|
/// * `service_name` - the name you send to register the service previously
|
||||||
fn unregister_appservice(&self, service_name: &str) -> Result<()>;
|
fn unregister_appservice(&self, service_name: &str) -> Result<()>;
|
||||||
|
|
||||||
fn get_registration(&self, id: &str) -> Result<Option<serde_yaml::Value>>;
|
fn get_registration(&self, id: &str) -> Result<Option<Registration>>;
|
||||||
|
|
||||||
fn iter_ids<'a>(&'a self) -> Result<Box<dyn Iterator<Item = Result<String>> + 'a>>;
|
fn iter_ids<'a>(&'a self) -> Result<Box<dyn Iterator<Item = Result<String>> + 'a>>;
|
||||||
|
|
||||||
fn all(&self) -> Result<Vec<(String, serde_yaml::Value)>>;
|
fn all(&self) -> Result<Vec<(String, Registration)>>;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,37 +1,184 @@
|
||||||
mod data;
|
mod data;
|
||||||
|
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
pub use data::Data;
|
pub use data::Data;
|
||||||
|
|
||||||
use crate::Result;
|
use futures_util::Future;
|
||||||
|
use regex::RegexSet;
|
||||||
|
use ruma::api::appservice::{Namespace, Registration};
|
||||||
|
use tokio::sync::RwLock;
|
||||||
|
|
||||||
|
use crate::{services, Result};
|
||||||
|
|
||||||
|
/// Compiled regular expressions for a namespace.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct NamespaceRegex {
|
||||||
|
pub exclusive: Option<RegexSet>,
|
||||||
|
pub non_exclusive: Option<RegexSet>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NamespaceRegex {
|
||||||
|
/// Checks if this namespace has rights to a namespace
|
||||||
|
pub fn is_match(&self, heystack: &str) -> bool {
|
||||||
|
if self.is_exclusive_match(heystack) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(non_exclusive) = &self.non_exclusive {
|
||||||
|
if non_exclusive.is_match(heystack) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks if this namespace has exlusive rights to a namespace
|
||||||
|
pub fn is_exclusive_match(&self, heystack: &str) -> bool {
|
||||||
|
if let Some(exclusive) = &self.exclusive {
|
||||||
|
if exclusive.is_match(heystack) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<Vec<Namespace>> for NamespaceRegex {
|
||||||
|
fn try_from(value: Vec<Namespace>) -> Result<Self, regex::Error> {
|
||||||
|
let mut exclusive = vec![];
|
||||||
|
let mut non_exclusive = vec![];
|
||||||
|
|
||||||
|
for namespace in value {
|
||||||
|
if namespace.exclusive {
|
||||||
|
exclusive.push(namespace.regex);
|
||||||
|
} else {
|
||||||
|
non_exclusive.push(namespace.regex);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(NamespaceRegex {
|
||||||
|
exclusive: if exclusive.is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(RegexSet::new(exclusive)?)
|
||||||
|
},
|
||||||
|
non_exclusive: if non_exclusive.is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(RegexSet::new(non_exclusive)?)
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type Error = regex::Error;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Appservice registration combined with its compiled regular expressions.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct RegistrationInfo {
|
||||||
|
pub registration: Registration,
|
||||||
|
pub users: NamespaceRegex,
|
||||||
|
pub aliases: NamespaceRegex,
|
||||||
|
pub rooms: NamespaceRegex,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<Registration> for RegistrationInfo {
|
||||||
|
fn try_from(value: Registration) -> Result<RegistrationInfo, regex::Error> {
|
||||||
|
Ok(RegistrationInfo {
|
||||||
|
users: value.namespaces.users.clone().try_into()?,
|
||||||
|
aliases: value.namespaces.aliases.clone().try_into()?,
|
||||||
|
rooms: value.namespaces.rooms.clone().try_into()?,
|
||||||
|
registration: value,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type Error = regex::Error;
|
||||||
|
}
|
||||||
|
|
||||||
pub struct Service {
|
pub struct Service {
|
||||||
pub db: &'static dyn Data,
|
pub db: &'static dyn Data,
|
||||||
|
registration_info: RwLock<BTreeMap<String, RegistrationInfo>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Service {
|
impl Service {
|
||||||
/// Registers an appservice and returns the ID to the caller
|
pub fn build(db: &'static dyn Data) -> Result<Self> {
|
||||||
pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result<String> {
|
let mut registration_info = BTreeMap::new();
|
||||||
|
// Inserting registrations into cache
|
||||||
|
for appservice in db.all()? {
|
||||||
|
registration_info.insert(
|
||||||
|
appservice.0,
|
||||||
|
appservice
|
||||||
|
.1
|
||||||
|
.try_into()
|
||||||
|
.expect("Should be validated on registration"),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
db,
|
||||||
|
registration_info: RwLock::new(registration_info),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
/// Registers an appservice and returns the ID to the caller.
|
||||||
|
pub async fn register_appservice(&self, yaml: Registration) -> Result<String> {
|
||||||
|
services()
|
||||||
|
.appservice
|
||||||
|
.registration_info
|
||||||
|
.write()
|
||||||
|
.await
|
||||||
|
.insert(yaml.id.clone(), yaml.clone().try_into()?);
|
||||||
|
|
||||||
self.db.register_appservice(yaml)
|
self.db.register_appservice(yaml)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Remove an appservice registration
|
/// Removes an appservice registration.
|
||||||
///
|
///
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
///
|
///
|
||||||
/// * `service_name` - the name you send to register the service previously
|
/// * `service_name` - the name you send to register the service previously
|
||||||
pub fn unregister_appservice(&self, service_name: &str) -> Result<()> {
|
pub async fn unregister_appservice(&self, service_name: &str) -> Result<()> {
|
||||||
|
services()
|
||||||
|
.appservice
|
||||||
|
.registration_info
|
||||||
|
.write()
|
||||||
|
.await
|
||||||
|
.remove(service_name)
|
||||||
|
.ok_or_else(|| crate::Error::AdminCommand("Appservice not found"))?;
|
||||||
|
|
||||||
self.db.unregister_appservice(service_name)
|
self.db.unregister_appservice(service_name)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_registration(&self, id: &str) -> Result<Option<serde_yaml::Value>> {
|
pub async fn get_registration(&self, id: &str) -> Option<Registration> {
|
||||||
self.db.get_registration(id)
|
self.registration_info
|
||||||
|
.read()
|
||||||
|
.await
|
||||||
|
.get(id)
|
||||||
|
.cloned()
|
||||||
|
.map(|info| info.registration)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn iter_ids(&self) -> Result<impl Iterator<Item = Result<String>> + '_> {
|
pub async fn iter_ids(&self) -> Vec<String> {
|
||||||
self.db.iter_ids()
|
self.registration_info
|
||||||
|
.read()
|
||||||
|
.await
|
||||||
|
.keys()
|
||||||
|
.cloned()
|
||||||
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn all(&self) -> Result<Vec<(String, serde_yaml::Value)>> {
|
pub async fn find_from_token(&self, token: &str) -> Option<RegistrationInfo> {
|
||||||
self.db.all()
|
self.read()
|
||||||
|
.await
|
||||||
|
.values()
|
||||||
|
.find(|info| info.registration.as_token == token)
|
||||||
|
.cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn read(
|
||||||
|
&self,
|
||||||
|
) -> impl Future<Output = tokio::sync::RwLockReadGuard<'_, BTreeMap<String, RegistrationInfo>>>
|
||||||
|
{
|
||||||
|
self.registration_info.read()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,6 +8,12 @@ use ruma::{
|
||||||
use crate::api::server_server::FedDest;
|
use crate::api::server_server::FedDest;
|
||||||
|
|
||||||
use crate::{services, Config, Error, Result};
|
use crate::{services, Config, Error, Result};
|
||||||
|
use futures_util::FutureExt;
|
||||||
|
use hyper::{
|
||||||
|
client::connect::dns::{GaiResolver, Name},
|
||||||
|
service::Service as HyperService,
|
||||||
|
};
|
||||||
|
use reqwest::dns::{Addrs, Resolve, Resolving};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{
|
api::{
|
||||||
client::sync::sync_events,
|
client::sync::sync_events,
|
||||||
|
@ -17,17 +23,19 @@ use ruma::{
|
||||||
};
|
};
|
||||||
use std::{
|
use std::{
|
||||||
collections::{BTreeMap, HashMap},
|
collections::{BTreeMap, HashMap},
|
||||||
|
error::Error as StdError,
|
||||||
fs,
|
fs,
|
||||||
future::Future,
|
future::{self, Future},
|
||||||
|
iter,
|
||||||
net::{IpAddr, SocketAddr},
|
net::{IpAddr, SocketAddr},
|
||||||
path::PathBuf,
|
path::PathBuf,
|
||||||
sync::{
|
sync::{
|
||||||
atomic::{self, AtomicBool},
|
atomic::{self, AtomicBool},
|
||||||
Arc, Mutex, RwLock,
|
Arc, RwLock as StdRwLock,
|
||||||
},
|
},
|
||||||
time::{Duration, Instant},
|
time::{Duration, Instant},
|
||||||
};
|
};
|
||||||
use tokio::sync::{broadcast, watch::Receiver, Mutex as TokioMutex, Semaphore};
|
use tokio::sync::{broadcast, watch::Receiver, Mutex, RwLock, Semaphore};
|
||||||
use tracing::{error, info};
|
use tracing::{error, info};
|
||||||
use trust_dns_resolver::TokioAsyncResolver;
|
use trust_dns_resolver::TokioAsyncResolver;
|
||||||
|
|
||||||
|
@ -45,7 +53,7 @@ pub struct Service {
|
||||||
pub db: &'static dyn Data,
|
pub db: &'static dyn Data,
|
||||||
|
|
||||||
pub actual_destination_cache: Arc<RwLock<WellKnownMap>>, // actual_destination, host
|
pub actual_destination_cache: Arc<RwLock<WellKnownMap>>, // actual_destination, host
|
||||||
pub tls_name_override: Arc<RwLock<TlsNameMap>>,
|
pub tls_name_override: Arc<StdRwLock<TlsNameMap>>,
|
||||||
pub config: Config,
|
pub config: Config,
|
||||||
keypair: Arc<ruma::signatures::Ed25519KeyPair>,
|
keypair: Arc<ruma::signatures::Ed25519KeyPair>,
|
||||||
dns_resolver: TokioAsyncResolver,
|
dns_resolver: TokioAsyncResolver,
|
||||||
|
@ -60,8 +68,8 @@ pub struct Service {
|
||||||
pub servername_ratelimiter: Arc<RwLock<HashMap<OwnedServerName, Arc<Semaphore>>>>,
|
pub servername_ratelimiter: Arc<RwLock<HashMap<OwnedServerName, Arc<Semaphore>>>>,
|
||||||
pub sync_receivers: RwLock<HashMap<(OwnedUserId, OwnedDeviceId), SyncHandle>>,
|
pub sync_receivers: RwLock<HashMap<(OwnedUserId, OwnedDeviceId), SyncHandle>>,
|
||||||
pub roomid_mutex_insert: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>,
|
pub roomid_mutex_insert: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>,
|
||||||
pub roomid_mutex_state: RwLock<HashMap<OwnedRoomId, Arc<TokioMutex<()>>>>,
|
pub roomid_mutex_state: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>,
|
||||||
pub roomid_mutex_federation: RwLock<HashMap<OwnedRoomId, Arc<TokioMutex<()>>>>, // this lock will be held longer
|
pub roomid_mutex_federation: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>, // this lock will be held longer
|
||||||
pub roomid_federationhandletime: RwLock<HashMap<OwnedRoomId, (OwnedEventId, Instant)>>,
|
pub roomid_federationhandletime: RwLock<HashMap<OwnedRoomId, (OwnedEventId, Instant)>>,
|
||||||
pub stateres_mutex: Arc<Mutex<()>>,
|
pub stateres_mutex: Arc<Mutex<()>>,
|
||||||
pub rotate: RotationHandler,
|
pub rotate: RotationHandler,
|
||||||
|
@ -99,6 +107,45 @@ impl Default for RotationHandler {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub struct Resolver {
|
||||||
|
inner: GaiResolver,
|
||||||
|
overrides: Arc<StdRwLock<TlsNameMap>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Resolver {
|
||||||
|
pub fn new(overrides: Arc<StdRwLock<TlsNameMap>>) -> Self {
|
||||||
|
Resolver {
|
||||||
|
inner: GaiResolver::new(),
|
||||||
|
overrides,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Resolve for Resolver {
|
||||||
|
fn resolve(&self, name: Name) -> Resolving {
|
||||||
|
self.overrides
|
||||||
|
.read()
|
||||||
|
.unwrap()
|
||||||
|
.get(name.as_str())
|
||||||
|
.and_then(|(override_name, port)| {
|
||||||
|
override_name.first().map(|first_name| {
|
||||||
|
let x: Box<dyn Iterator<Item = SocketAddr> + Send> =
|
||||||
|
Box::new(iter::once(SocketAddr::new(*first_name, *port)));
|
||||||
|
let x: Resolving = Box::pin(future::ready(Ok(x)));
|
||||||
|
x
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
let this = &mut self.inner.clone();
|
||||||
|
Box::pin(HyperService::<Name>::call(this, name).map(|result| {
|
||||||
|
result
|
||||||
|
.map(|addrs| -> Addrs { Box::new(addrs) })
|
||||||
|
.map_err(|err| -> Box<dyn StdError + Send + Sync> { Box::new(err) })
|
||||||
|
}))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Service {
|
impl Service {
|
||||||
pub fn load(db: &'static dyn Data, config: Config) -> Result<Self> {
|
pub fn load(db: &'static dyn Data, config: Config) -> Result<Self> {
|
||||||
let keypair = db.load_keypair();
|
let keypair = db.load_keypair();
|
||||||
|
@ -112,7 +159,7 @@ impl Service {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let tls_name_override = Arc::new(RwLock::new(TlsNameMap::new()));
|
let tls_name_override = Arc::new(StdRwLock::new(TlsNameMap::new()));
|
||||||
|
|
||||||
let jwt_decoding_key = config
|
let jwt_decoding_key = config
|
||||||
.jwt_secret
|
.jwt_secret
|
||||||
|
@ -120,14 +167,8 @@ impl Service {
|
||||||
.map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()));
|
.map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()));
|
||||||
|
|
||||||
let default_client = reqwest_client_builder(&config)?.build()?;
|
let default_client = reqwest_client_builder(&config)?.build()?;
|
||||||
let name_override = Arc::clone(&tls_name_override);
|
|
||||||
let federation_client = reqwest_client_builder(&config)?
|
let federation_client = reqwest_client_builder(&config)?
|
||||||
.resolve_fn(move |domain| {
|
.dns_resolver(Arc::new(Resolver::new(tls_name_override.clone())))
|
||||||
let read_guard = name_override.read().unwrap();
|
|
||||||
let (override_name, port) = read_guard.get(&domain)?;
|
|
||||||
let first_name = override_name.get(0)?;
|
|
||||||
Some(SocketAddr::new(*first_name, *port))
|
|
||||||
})
|
|
||||||
.build()?;
|
.build()?;
|
||||||
|
|
||||||
// Supported and stable room versions
|
// Supported and stable room versions
|
||||||
|
|
|
@ -1,11 +1,13 @@
|
||||||
use std::{
|
use std::{
|
||||||
collections::{BTreeMap, HashMap},
|
collections::{BTreeMap, HashMap},
|
||||||
sync::{Arc, Mutex},
|
sync::{Arc, Mutex as StdMutex},
|
||||||
};
|
};
|
||||||
|
|
||||||
use lru_cache::LruCache;
|
use lru_cache::LruCache;
|
||||||
|
use tokio::sync::{broadcast, Mutex};
|
||||||
|
|
||||||
use crate::{Config, Result};
|
use crate::{Config, Result};
|
||||||
|
use tokio::sync::RwLock;
|
||||||
|
|
||||||
pub mod account_data;
|
pub mod account_data;
|
||||||
pub mod admin;
|
pub mod admin;
|
||||||
|
@ -55,7 +57,7 @@ impl Services {
|
||||||
config: Config,
|
config: Config,
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
appservice: appservice::Service { db },
|
appservice: appservice::Service::build(db)?,
|
||||||
pusher: pusher::Service { db },
|
pusher: pusher::Service { db },
|
||||||
rooms: rooms::Service {
|
rooms: rooms::Service {
|
||||||
alias: rooms::alias::Service { db },
|
alias: rooms::alias::Service { db },
|
||||||
|
@ -64,7 +66,11 @@ impl Services {
|
||||||
edus: rooms::edus::Service {
|
edus: rooms::edus::Service {
|
||||||
presence: rooms::edus::presence::Service { db },
|
presence: rooms::edus::presence::Service { db },
|
||||||
read_receipt: rooms::edus::read_receipt::Service { db },
|
read_receipt: rooms::edus::read_receipt::Service { db },
|
||||||
typing: rooms::edus::typing::Service { db },
|
typing: rooms::edus::typing::Service {
|
||||||
|
typing: RwLock::new(BTreeMap::new()),
|
||||||
|
last_typing_update: RwLock::new(BTreeMap::new()),
|
||||||
|
typing_update_sender: broadcast::channel(100).0,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
event_handler: rooms::event_handler::Service,
|
event_handler: rooms::event_handler::Service,
|
||||||
lazy_loading: rooms::lazy_loading::Service {
|
lazy_loading: rooms::lazy_loading::Service {
|
||||||
|
@ -79,17 +85,17 @@ impl Services {
|
||||||
state: rooms::state::Service { db },
|
state: rooms::state::Service { db },
|
||||||
state_accessor: rooms::state_accessor::Service {
|
state_accessor: rooms::state_accessor::Service {
|
||||||
db,
|
db,
|
||||||
server_visibility_cache: Mutex::new(LruCache::new(
|
server_visibility_cache: StdMutex::new(LruCache::new(
|
||||||
(100.0 * config.conduit_cache_capacity_modifier) as usize,
|
(100.0 * config.conduit_cache_capacity_modifier) as usize,
|
||||||
)),
|
)),
|
||||||
user_visibility_cache: Mutex::new(LruCache::new(
|
user_visibility_cache: StdMutex::new(LruCache::new(
|
||||||
(100.0 * config.conduit_cache_capacity_modifier) as usize,
|
(100.0 * config.conduit_cache_capacity_modifier) as usize,
|
||||||
)),
|
)),
|
||||||
},
|
},
|
||||||
state_cache: rooms::state_cache::Service { db },
|
state_cache: rooms::state_cache::Service { db },
|
||||||
state_compressor: rooms::state_compressor::Service {
|
state_compressor: rooms::state_compressor::Service {
|
||||||
db,
|
db,
|
||||||
stateinfo_cache: Mutex::new(LruCache::new(
|
stateinfo_cache: StdMutex::new(LruCache::new(
|
||||||
(100.0 * config.conduit_cache_capacity_modifier) as usize,
|
(100.0 * config.conduit_cache_capacity_modifier) as usize,
|
||||||
)),
|
)),
|
||||||
},
|
},
|
||||||
|
@ -107,7 +113,7 @@ impl Services {
|
||||||
uiaa: uiaa::Service { db },
|
uiaa: uiaa::Service { db },
|
||||||
users: users::Service {
|
users: users::Service {
|
||||||
db,
|
db,
|
||||||
connections: Mutex::new(BTreeMap::new()),
|
connections: StdMutex::new(BTreeMap::new()),
|
||||||
},
|
},
|
||||||
account_data: account_data::Service { db },
|
account_data: account_data::Service { db },
|
||||||
admin: admin::Service::build(),
|
admin: admin::Service::build(),
|
||||||
|
@ -118,14 +124,8 @@ impl Services {
|
||||||
globals: globals::Service::load(db, config)?,
|
globals: globals::Service::load(db, config)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
fn memory_usage(&self) -> String {
|
async fn memory_usage(&self) -> String {
|
||||||
let lazy_load_waiting = self
|
let lazy_load_waiting = self.rooms.lazy_loading.lazy_load_waiting.lock().await.len();
|
||||||
.rooms
|
|
||||||
.lazy_loading
|
|
||||||
.lazy_load_waiting
|
|
||||||
.lock()
|
|
||||||
.unwrap()
|
|
||||||
.len();
|
|
||||||
let server_visibility_cache = self
|
let server_visibility_cache = self
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
|
@ -152,15 +152,9 @@ impl Services {
|
||||||
.timeline
|
.timeline
|
||||||
.lasttimelinecount_cache
|
.lasttimelinecount_cache
|
||||||
.lock()
|
.lock()
|
||||||
.unwrap()
|
.await
|
||||||
.len();
|
|
||||||
let roomid_spacechunk_cache = self
|
|
||||||
.rooms
|
|
||||||
.spaces
|
|
||||||
.roomid_spacechunk_cache
|
|
||||||
.lock()
|
|
||||||
.unwrap()
|
|
||||||
.len();
|
.len();
|
||||||
|
let roomid_spacechunk_cache = self.rooms.spaces.roomid_spacechunk_cache.lock().await.len();
|
||||||
|
|
||||||
format!(
|
format!(
|
||||||
"\
|
"\
|
||||||
|
@ -173,13 +167,13 @@ roomid_spacechunk_cache: {roomid_spacechunk_cache}\
|
||||||
"
|
"
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
fn clear_caches(&self, amount: u32) {
|
async fn clear_caches(&self, amount: u32) {
|
||||||
if amount > 0 {
|
if amount > 0 {
|
||||||
self.rooms
|
self.rooms
|
||||||
.lazy_loading
|
.lazy_loading
|
||||||
.lazy_load_waiting
|
.lazy_load_waiting
|
||||||
.lock()
|
.lock()
|
||||||
.unwrap()
|
.await
|
||||||
.clear();
|
.clear();
|
||||||
}
|
}
|
||||||
if amount > 1 {
|
if amount > 1 {
|
||||||
|
@ -211,7 +205,7 @@ roomid_spacechunk_cache: {roomid_spacechunk_cache}\
|
||||||
.timeline
|
.timeline
|
||||||
.lasttimelinecount_cache
|
.lasttimelinecount_cache
|
||||||
.lock()
|
.lock()
|
||||||
.unwrap()
|
.await
|
||||||
.clear();
|
.clear();
|
||||||
}
|
}
|
||||||
if amount > 5 {
|
if amount > 5 {
|
||||||
|
@ -219,7 +213,7 @@ roomid_spacechunk_cache: {roomid_spacechunk_cache}\
|
||||||
.spaces
|
.spaces
|
||||||
.roomid_spacechunk_cache
|
.roomid_spacechunk_cache
|
||||||
.lock()
|
.lock()
|
||||||
.unwrap()
|
.await
|
||||||
.clear();
|
.clear();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -365,7 +365,7 @@ impl PartialEq for PduEvent {
|
||||||
}
|
}
|
||||||
impl PartialOrd for PduEvent {
|
impl PartialOrd for PduEvent {
|
||||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||||
self.event_id.partial_cmp(&other.event_id)
|
Some(self.cmp(other))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl Ord for PduEvent {
|
impl Ord for PduEvent {
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
mod data;
|
mod data;
|
||||||
pub use data::Data;
|
pub use data::Data;
|
||||||
use ruma::events::AnySyncTimelineEvent;
|
use ruma::{events::AnySyncTimelineEvent, push::PushConditionPowerLevelsCtx};
|
||||||
|
|
||||||
use crate::{services, Error, PduEvent, Result};
|
use crate::{services, Error, PduEvent, Result};
|
||||||
use bytes::BytesMut;
|
use bytes::BytesMut;
|
||||||
|
@ -66,8 +66,7 @@ impl Service {
|
||||||
})?
|
})?
|
||||||
.map(|body| body.freeze());
|
.map(|body| body.freeze());
|
||||||
|
|
||||||
let reqwest_request = reqwest::Request::try_from(http_request)
|
let reqwest_request = reqwest::Request::try_from(http_request)?;
|
||||||
.expect("all http requests are valid reqwest requests");
|
|
||||||
|
|
||||||
// TODO: we could keep this very short and let expo backoff do it's thing...
|
// TODO: we could keep this very short and let expo backoff do it's thing...
|
||||||
//*reqwest_request.timeout_mut() = Some(Duration::from_secs(5));
|
//*reqwest_request.timeout_mut() = Some(Duration::from_secs(5));
|
||||||
|
@ -193,6 +192,12 @@ impl Service {
|
||||||
pdu: &Raw<AnySyncTimelineEvent>,
|
pdu: &Raw<AnySyncTimelineEvent>,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
) -> Result<&'a [Action]> {
|
) -> Result<&'a [Action]> {
|
||||||
|
let power_levels = PushConditionPowerLevelsCtx {
|
||||||
|
users: power_levels.users.clone(),
|
||||||
|
users_default: power_levels.users_default,
|
||||||
|
notifications: power_levels.notifications.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
let ctx = PushConditionRoomCtx {
|
let ctx = PushConditionRoomCtx {
|
||||||
room_id: room_id.to_owned(),
|
room_id: room_id.to_owned(),
|
||||||
member_count: 10_u32.into(), // TODO: get member count efficiently
|
member_count: 10_u32.into(), // TODO: get member count efficiently
|
||||||
|
@ -201,9 +206,7 @@ impl Service {
|
||||||
.users
|
.users
|
||||||
.displayname(user)?
|
.displayname(user)?
|
||||||
.unwrap_or_else(|| user.localpart().to_owned()),
|
.unwrap_or_else(|| user.localpart().to_owned()),
|
||||||
users_power_levels: power_levels.users.clone(),
|
power_levels: Some(power_levels),
|
||||||
default_power_level: power_levels.users_default,
|
|
||||||
notification_power_levels: power_levels.notifications.clone(),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(ruleset.get_actions(pdu, &ctx))
|
Ok(ruleset.get_actions(pdu, &ctx))
|
||||||
|
|
|
@ -2,7 +2,7 @@ pub mod presence;
|
||||||
pub mod read_receipt;
|
pub mod read_receipt;
|
||||||
pub mod typing;
|
pub mod typing;
|
||||||
|
|
||||||
pub trait Data: presence::Data + read_receipt::Data + typing::Data + 'static {}
|
pub trait Data: presence::Data + read_receipt::Data + 'static {}
|
||||||
|
|
||||||
pub struct Service {
|
pub struct Service {
|
||||||
pub presence: presence::Service,
|
pub presence: presence::Service,
|
||||||
|
|
|
@ -17,29 +17,32 @@ impl Service {
|
||||||
/// make sure users outside these rooms can't see them.
|
/// make sure users outside these rooms can't see them.
|
||||||
pub fn update_presence(
|
pub fn update_presence(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
_user_id: &UserId,
|
||||||
room_id: &RoomId,
|
_room_id: &RoomId,
|
||||||
presence: PresenceEvent,
|
_presence: PresenceEvent,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
self.db.update_presence(user_id, room_id, presence)
|
// self.db.update_presence(user_id, room_id, presence)
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Resets the presence timeout, so the user will stay in their current presence state.
|
/// Resets the presence timeout, so the user will stay in their current presence state.
|
||||||
pub fn ping_presence(&self, user_id: &UserId) -> Result<()> {
|
pub fn ping_presence(&self, _user_id: &UserId) -> Result<()> {
|
||||||
self.db.ping_presence(user_id)
|
// self.db.ping_presence(user_id)
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_last_presence_event(
|
pub fn get_last_presence_event(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
_user_id: &UserId,
|
||||||
room_id: &RoomId,
|
_room_id: &RoomId,
|
||||||
) -> Result<Option<PresenceEvent>> {
|
) -> Result<Option<PresenceEvent>> {
|
||||||
let last_update = match self.db.last_presence_update(user_id)? {
|
// let last_update = match self.db.last_presence_update(user_id)? {
|
||||||
Some(last) => last,
|
// Some(last) => last,
|
||||||
None => return Ok(None),
|
// None => return Ok(None),
|
||||||
};
|
// };
|
||||||
|
|
||||||
self.db.get_presence_event(room_id, user_id, last_update)
|
// self.db.get_presence_event(room_id, user_id, last_update)
|
||||||
|
Ok(None)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* TODO
|
/* TODO
|
||||||
|
@ -111,12 +114,12 @@ impl Service {
|
||||||
}*/
|
}*/
|
||||||
|
|
||||||
/// Returns the most recent presence updates that happened after the event with id `since`.
|
/// Returns the most recent presence updates that happened after the event with id `since`.
|
||||||
#[tracing::instrument(skip(self, since, room_id))]
|
|
||||||
pub fn presence_since(
|
pub fn presence_since(
|
||||||
&self,
|
&self,
|
||||||
room_id: &RoomId,
|
_room_id: &RoomId,
|
||||||
since: u64,
|
_since: u64,
|
||||||
) -> Result<HashMap<OwnedUserId, PresenceEvent>> {
|
) -> Result<HashMap<OwnedUserId, PresenceEvent>> {
|
||||||
self.db.presence_since(room_id, since)
|
// self.db.presence_since(room_id, since)
|
||||||
|
Ok(HashMap::new())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,6 +11,7 @@ pub trait Data: Send + Sync {
|
||||||
) -> Result<()>;
|
) -> Result<()>;
|
||||||
|
|
||||||
/// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`.
|
/// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`.
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
fn readreceipts_since<'a>(
|
fn readreceipts_since<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
|
|
|
@ -1,21 +0,0 @@
|
||||||
use crate::Result;
|
|
||||||
use ruma::{OwnedUserId, RoomId, UserId};
|
|
||||||
use std::collections::HashSet;
|
|
||||||
|
|
||||||
pub trait Data: Send + Sync {
|
|
||||||
/// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is
|
|
||||||
/// called.
|
|
||||||
fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()>;
|
|
||||||
|
|
||||||
/// Removes a user from typing before the timeout is reached.
|
|
||||||
fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>;
|
|
||||||
|
|
||||||
/// Makes sure that typing events with old timestamps get removed.
|
|
||||||
fn typings_maintain(&self, room_id: &RoomId) -> Result<()>;
|
|
||||||
|
|
||||||
/// Returns the count of the last typing update in this room.
|
|
||||||
fn last_typing_update(&self, room_id: &RoomId) -> Result<u64>;
|
|
||||||
|
|
||||||
/// Returns all user ids currently typing.
|
|
||||||
fn typings_all(&self, room_id: &RoomId) -> Result<HashSet<OwnedUserId>>;
|
|
||||||
}
|
|
|
@ -1,48 +1,117 @@
|
||||||
mod data;
|
use ruma::{events::SyncEphemeralRoomEvent, OwnedRoomId, OwnedUserId, RoomId, UserId};
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
use tokio::sync::{broadcast, RwLock};
|
||||||
|
|
||||||
pub use data::Data;
|
use crate::{services, utils, Result};
|
||||||
use ruma::{events::SyncEphemeralRoomEvent, RoomId, UserId};
|
|
||||||
|
|
||||||
use crate::Result;
|
|
||||||
|
|
||||||
pub struct Service {
|
pub struct Service {
|
||||||
pub db: &'static dyn Data,
|
pub typing: RwLock<BTreeMap<OwnedRoomId, BTreeMap<OwnedUserId, u64>>>, // u64 is unix timestamp of timeout
|
||||||
|
pub last_typing_update: RwLock<BTreeMap<OwnedRoomId, u64>>, // timestamp of the last change to typing users
|
||||||
|
pub typing_update_sender: broadcast::Sender<OwnedRoomId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Service {
|
impl Service {
|
||||||
/// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is
|
/// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is
|
||||||
/// called.
|
/// called.
|
||||||
pub fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> {
|
pub async fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> {
|
||||||
self.db.typing_add(user_id, room_id, timeout)
|
self.typing
|
||||||
|
.write()
|
||||||
|
.await
|
||||||
|
.entry(room_id.to_owned())
|
||||||
|
.or_default()
|
||||||
|
.insert(user_id.to_owned(), timeout);
|
||||||
|
self.last_typing_update
|
||||||
|
.write()
|
||||||
|
.await
|
||||||
|
.insert(room_id.to_owned(), services().globals.next_count()?);
|
||||||
|
let _ = self.typing_update_sender.send(room_id.to_owned());
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Removes a user from typing before the timeout is reached.
|
/// Removes a user from typing before the timeout is reached.
|
||||||
pub fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
pub async fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
||||||
self.db.typing_remove(user_id, room_id)
|
self.typing
|
||||||
|
.write()
|
||||||
|
.await
|
||||||
|
.entry(room_id.to_owned())
|
||||||
|
.or_default()
|
||||||
|
.remove(user_id);
|
||||||
|
self.last_typing_update
|
||||||
|
.write()
|
||||||
|
.await
|
||||||
|
.insert(room_id.to_owned(), services().globals.next_count()?);
|
||||||
|
let _ = self.typing_update_sender.send(room_id.to_owned());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn wait_for_update(&self, room_id: &RoomId) -> Result<()> {
|
||||||
|
let mut receiver = self.typing_update_sender.subscribe();
|
||||||
|
while let Ok(next) = receiver.recv().await {
|
||||||
|
if next == room_id {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Makes sure that typing events with old timestamps get removed.
|
/// Makes sure that typing events with old timestamps get removed.
|
||||||
fn typings_maintain(&self, room_id: &RoomId) -> Result<()> {
|
async fn typings_maintain(&self, room_id: &RoomId) -> Result<()> {
|
||||||
self.db.typings_maintain(room_id)
|
let current_timestamp = utils::millis_since_unix_epoch();
|
||||||
|
let mut removable = Vec::new();
|
||||||
|
{
|
||||||
|
let typing = self.typing.read().await;
|
||||||
|
let Some(room) = typing.get(room_id) else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
for (user, timeout) in room {
|
||||||
|
if *timeout < current_timestamp {
|
||||||
|
removable.push(user.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
drop(typing);
|
||||||
|
}
|
||||||
|
if !removable.is_empty() {
|
||||||
|
let typing = &mut self.typing.write().await;
|
||||||
|
let room = typing.entry(room_id.to_owned()).or_default();
|
||||||
|
for user in removable {
|
||||||
|
room.remove(&user);
|
||||||
|
}
|
||||||
|
self.last_typing_update
|
||||||
|
.write()
|
||||||
|
.await
|
||||||
|
.insert(room_id.to_owned(), services().globals.next_count()?);
|
||||||
|
let _ = self.typing_update_sender.send(room_id.to_owned());
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the count of the last typing update in this room.
|
/// Returns the count of the last typing update in this room.
|
||||||
pub fn last_typing_update(&self, room_id: &RoomId) -> Result<u64> {
|
pub async fn last_typing_update(&self, room_id: &RoomId) -> Result<u64> {
|
||||||
self.typings_maintain(room_id)?;
|
self.typings_maintain(room_id).await?;
|
||||||
|
Ok(self
|
||||||
self.db.last_typing_update(room_id)
|
.last_typing_update
|
||||||
|
.read()
|
||||||
|
.await
|
||||||
|
.get(room_id)
|
||||||
|
.copied()
|
||||||
|
.unwrap_or(0))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a new typing EDU.
|
/// Returns a new typing EDU.
|
||||||
pub fn typings_all(
|
pub async fn typings_all(
|
||||||
&self,
|
&self,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
) -> Result<SyncEphemeralRoomEvent<ruma::events::typing::TypingEventContent>> {
|
) -> Result<SyncEphemeralRoomEvent<ruma::events::typing::TypingEventContent>> {
|
||||||
let user_ids = self.db.typings_all(room_id)?;
|
|
||||||
|
|
||||||
Ok(SyncEphemeralRoomEvent {
|
Ok(SyncEphemeralRoomEvent {
|
||||||
content: ruma::events::typing::TypingEventContent {
|
content: ruma::events::typing::TypingEventContent {
|
||||||
user_ids: user_ids.into_iter().collect(),
|
user_ids: self
|
||||||
|
.typing
|
||||||
|
.read()
|
||||||
|
.await
|
||||||
|
.get(room_id)
|
||||||
|
.map(|m| m.keys().cloned().collect())
|
||||||
|
.unwrap_or_default(),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,25 +1,23 @@
|
||||||
/// An async function that can recursively call itself.
|
/// An async function that can recursively call itself.
|
||||||
type AsyncRecursiveType<'a, T> = Pin<Box<dyn Future<Output = T> + 'a + Send>>;
|
type AsyncRecursiveType<'a, T> = Pin<Box<dyn Future<Output = T> + 'a + Send>>;
|
||||||
|
|
||||||
use ruma::{
|
|
||||||
api::federation::discovery::{get_remote_server_keys, get_server_keys},
|
|
||||||
CanonicalJsonObject, CanonicalJsonValue, OwnedServerName, OwnedServerSigningKeyId,
|
|
||||||
RoomVersionId,
|
|
||||||
};
|
|
||||||
use std::{
|
use std::{
|
||||||
collections::{hash_map, BTreeMap, HashMap, HashSet},
|
collections::{hash_map, BTreeMap, HashMap, HashSet},
|
||||||
pin::Pin,
|
pin::Pin,
|
||||||
sync::{Arc, RwLock, RwLockWriteGuard},
|
sync::Arc,
|
||||||
time::{Duration, Instant, SystemTime},
|
time::{Duration, Instant, SystemTime},
|
||||||
};
|
};
|
||||||
use tokio::sync::Semaphore;
|
|
||||||
|
|
||||||
use futures_util::{stream::FuturesUnordered, Future, StreamExt};
|
use futures_util::{stream::FuturesUnordered, Future, StreamExt};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{
|
api::{
|
||||||
client::error::ErrorKind,
|
client::error::ErrorKind,
|
||||||
federation::{
|
federation::{
|
||||||
discovery::get_remote_server_keys_batch::{self, v2::QueryCriteria},
|
discovery::{
|
||||||
|
get_remote_server_keys,
|
||||||
|
get_remote_server_keys_batch::{self, v2::QueryCriteria},
|
||||||
|
get_server_keys,
|
||||||
|
},
|
||||||
event::{get_event, get_room_state_ids},
|
event::{get_event, get_room_state_ids},
|
||||||
membership::create_join_event,
|
membership::create_join_event,
|
||||||
},
|
},
|
||||||
|
@ -31,9 +29,11 @@ use ruma::{
|
||||||
int,
|
int,
|
||||||
serde::Base64,
|
serde::Base64,
|
||||||
state_res::{self, RoomVersion, StateMap},
|
state_res::{self, RoomVersion, StateMap},
|
||||||
uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName,
|
uint, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch,
|
||||||
|
OwnedServerName, OwnedServerSigningKeyId, RoomId, RoomVersionId, ServerName,
|
||||||
};
|
};
|
||||||
use serde_json::value::RawValue as RawJsonValue;
|
use serde_json::value::RawValue as RawJsonValue;
|
||||||
|
use tokio::sync::{RwLock, RwLockWriteGuard, Semaphore};
|
||||||
use tracing::{debug, error, info, trace, warn};
|
use tracing::{debug, error, info, trace, warn};
|
||||||
|
|
||||||
use crate::{service::*, services, Error, PduEvent, Result};
|
use crate::{service::*, services, Error, PduEvent, Result};
|
||||||
|
@ -92,7 +92,7 @@ impl Service {
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
services().rooms.event_handler.acl_check(origin, &room_id)?;
|
services().rooms.event_handler.acl_check(origin, room_id)?;
|
||||||
|
|
||||||
// 1. Skip the PDU if we already have it as a timeline event
|
// 1. Skip the PDU if we already have it as a timeline event
|
||||||
if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(event_id)? {
|
if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(event_id)? {
|
||||||
|
@ -168,7 +168,7 @@ impl Service {
|
||||||
.globals
|
.globals
|
||||||
.bad_event_ratelimiter
|
.bad_event_ratelimiter
|
||||||
.read()
|
.read()
|
||||||
.unwrap()
|
.await
|
||||||
.get(&*prev_id)
|
.get(&*prev_id)
|
||||||
{
|
{
|
||||||
// Exponential backoff
|
// Exponential backoff
|
||||||
|
@ -184,7 +184,22 @@ impl Service {
|
||||||
}
|
}
|
||||||
|
|
||||||
if errors >= 5 {
|
if errors >= 5 {
|
||||||
break;
|
// Timeout other events
|
||||||
|
match services()
|
||||||
|
.globals
|
||||||
|
.bad_event_ratelimiter
|
||||||
|
.write()
|
||||||
|
.await
|
||||||
|
.entry((*prev_id).to_owned())
|
||||||
|
{
|
||||||
|
hash_map::Entry::Vacant(e) => {
|
||||||
|
e.insert((Instant::now(), 1));
|
||||||
|
}
|
||||||
|
hash_map::Entry::Occupied(mut e) => {
|
||||||
|
*e.get_mut() = (Instant::now(), e.get().1 + 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some((pdu, json)) = eventid_info.remove(&*prev_id) {
|
if let Some((pdu, json)) = eventid_info.remove(&*prev_id) {
|
||||||
|
@ -198,7 +213,7 @@ impl Service {
|
||||||
.globals
|
.globals
|
||||||
.roomid_federationhandletime
|
.roomid_federationhandletime
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time));
|
.insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time));
|
||||||
|
|
||||||
if let Err(e) = self
|
if let Err(e) = self
|
||||||
|
@ -218,7 +233,7 @@ impl Service {
|
||||||
.globals
|
.globals
|
||||||
.bad_event_ratelimiter
|
.bad_event_ratelimiter
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.entry((*prev_id).to_owned())
|
.entry((*prev_id).to_owned())
|
||||||
{
|
{
|
||||||
hash_map::Entry::Vacant(e) => {
|
hash_map::Entry::Vacant(e) => {
|
||||||
|
@ -234,7 +249,7 @@ impl Service {
|
||||||
.globals
|
.globals
|
||||||
.roomid_federationhandletime
|
.roomid_federationhandletime
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.remove(&room_id.to_owned());
|
.remove(&room_id.to_owned());
|
||||||
debug!(
|
debug!(
|
||||||
"Handling prev event {} took {}m{}s",
|
"Handling prev event {} took {}m{}s",
|
||||||
|
@ -252,7 +267,7 @@ impl Service {
|
||||||
.globals
|
.globals
|
||||||
.roomid_federationhandletime
|
.roomid_federationhandletime
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.insert(room_id.to_owned(), (event_id.to_owned(), start_time));
|
.insert(room_id.to_owned(), (event_id.to_owned(), start_time));
|
||||||
let r = services()
|
let r = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -270,12 +285,13 @@ impl Service {
|
||||||
.globals
|
.globals
|
||||||
.roomid_federationhandletime
|
.roomid_federationhandletime
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.remove(&room_id.to_owned());
|
.remove(&room_id.to_owned());
|
||||||
|
|
||||||
r
|
r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::type_complexity, clippy::too_many_arguments)]
|
||||||
#[tracing::instrument(skip(self, create_event, value, pub_key_map))]
|
#[tracing::instrument(skip(self, create_event, value, pub_key_map))]
|
||||||
fn handle_outlier_pdu<'a>(
|
fn handle_outlier_pdu<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
|
@ -310,11 +326,8 @@ impl Service {
|
||||||
let room_version =
|
let room_version =
|
||||||
RoomVersion::new(room_version_id).expect("room version is supported");
|
RoomVersion::new(room_version_id).expect("room version is supported");
|
||||||
|
|
||||||
let mut val = match ruma::signatures::verify_event(
|
let guard = pub_key_map.read().await;
|
||||||
&pub_key_map.read().expect("RwLock is poisoned."),
|
let mut val = match ruma::signatures::verify_event(&guard, &value, room_version_id) {
|
||||||
&value,
|
|
||||||
room_version_id,
|
|
||||||
) {
|
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
// Drop
|
// Drop
|
||||||
warn!("Dropping bad event {}: {}", event_id, e,);
|
warn!("Dropping bad event {}: {}", event_id, e,);
|
||||||
|
@ -349,6 +362,8 @@ impl Service {
|
||||||
Ok(ruma::signatures::Verified::All) => value,
|
Ok(ruma::signatures::Verified::All) => value,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
drop(guard);
|
||||||
|
|
||||||
// Now that we have checked the signature and hashes we can add the eventID and convert
|
// Now that we have checked the signature and hashes we can add the eventID and convert
|
||||||
// to our PduEvent type
|
// to our PduEvent type
|
||||||
val.insert(
|
val.insert(
|
||||||
|
@ -676,13 +691,15 @@ impl Service {
|
||||||
{
|
{
|
||||||
Ok(res) => {
|
Ok(res) => {
|
||||||
debug!("Fetching state events at event.");
|
debug!("Fetching state events at event.");
|
||||||
|
let collect = res
|
||||||
|
.pdu_ids
|
||||||
|
.iter()
|
||||||
|
.map(|x| Arc::from(&**x))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
let state_vec = self
|
let state_vec = self
|
||||||
.fetch_and_handle_outliers(
|
.fetch_and_handle_outliers(
|
||||||
origin,
|
origin,
|
||||||
&res.pdu_ids
|
&collect,
|
||||||
.iter()
|
|
||||||
.map(|x| Arc::from(&**x))
|
|
||||||
.collect::<Vec<_>>(),
|
|
||||||
create_event,
|
create_event,
|
||||||
room_id,
|
room_id,
|
||||||
room_version_id,
|
room_version_id,
|
||||||
|
@ -789,7 +806,7 @@ impl Service {
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.await
|
||||||
.entry(room_id.to_owned())
|
.entry(room_id.to_owned())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
|
@ -868,14 +885,18 @@ impl Service {
|
||||||
debug!("Starting soft fail auth check");
|
debug!("Starting soft fail auth check");
|
||||||
|
|
||||||
if soft_fail {
|
if soft_fail {
|
||||||
services().rooms.timeline.append_incoming_pdu(
|
services()
|
||||||
&incoming_pdu,
|
.rooms
|
||||||
val,
|
.timeline
|
||||||
extremities.iter().map(|e| (**e).to_owned()).collect(),
|
.append_incoming_pdu(
|
||||||
state_ids_compressed,
|
&incoming_pdu,
|
||||||
soft_fail,
|
val,
|
||||||
&state_lock,
|
extremities.iter().map(|e| (**e).to_owned()).collect(),
|
||||||
)?;
|
state_ids_compressed,
|
||||||
|
soft_fail,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// Soft fail, we keep the event as an outlier but don't add it to the timeline
|
// Soft fail, we keep the event as an outlier but don't add it to the timeline
|
||||||
warn!("Event was soft failed: {:?}", incoming_pdu);
|
warn!("Event was soft failed: {:?}", incoming_pdu);
|
||||||
|
@ -896,14 +917,18 @@ impl Service {
|
||||||
// We use the `state_at_event` instead of `state_after` so we accurately
|
// We use the `state_at_event` instead of `state_after` so we accurately
|
||||||
// represent the state for this event.
|
// represent the state for this event.
|
||||||
|
|
||||||
let pdu_id = services().rooms.timeline.append_incoming_pdu(
|
let pdu_id = services()
|
||||||
&incoming_pdu,
|
.rooms
|
||||||
val,
|
.timeline
|
||||||
extremities.iter().map(|e| (**e).to_owned()).collect(),
|
.append_incoming_pdu(
|
||||||
state_ids_compressed,
|
&incoming_pdu,
|
||||||
soft_fail,
|
val,
|
||||||
&state_lock,
|
extremities.iter().map(|e| (**e).to_owned()).collect(),
|
||||||
)?;
|
state_ids_compressed,
|
||||||
|
soft_fail,
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
debug!("Appended incoming pdu");
|
debug!("Appended incoming pdu");
|
||||||
|
|
||||||
|
@ -965,14 +990,21 @@ impl Service {
|
||||||
|
|
||||||
debug!("Resolving state");
|
debug!("Resolving state");
|
||||||
|
|
||||||
let lock = services().globals.stateres_mutex.lock();
|
let fetch_event = |id: &_| {
|
||||||
let state = match state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| {
|
|
||||||
let res = services().rooms.timeline.get_pdu(id);
|
let res = services().rooms.timeline.get_pdu(id);
|
||||||
if let Err(e) = &res {
|
if let Err(e) = &res {
|
||||||
error!("LOOK AT ME Failed to fetch event: {}", e);
|
error!("LOOK AT ME Failed to fetch event: {}", e);
|
||||||
}
|
}
|
||||||
res.ok().flatten()
|
res.ok().flatten()
|
||||||
}) {
|
};
|
||||||
|
|
||||||
|
let lock = services().globals.stateres_mutex.lock();
|
||||||
|
let state = match state_res::resolve(
|
||||||
|
room_version_id,
|
||||||
|
&fork_states,
|
||||||
|
auth_chain_sets,
|
||||||
|
fetch_event,
|
||||||
|
) {
|
||||||
Ok(new_state) => new_state,
|
Ok(new_state) => new_state,
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
return Err(Error::bad_database("State resolution failed, either an event could not be found or deserialization"));
|
return Err(Error::bad_database("State resolution failed, either an event could not be found or deserialization"));
|
||||||
|
@ -1009,6 +1041,7 @@ impl Service {
|
||||||
/// b. Look at outlier pdu tree
|
/// b. Look at outlier pdu tree
|
||||||
/// c. Ask origin server over federation
|
/// c. Ask origin server over federation
|
||||||
/// d. TODO: Ask other servers over federation?
|
/// d. TODO: Ask other servers over federation?
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
#[tracing::instrument(skip_all)]
|
#[tracing::instrument(skip_all)]
|
||||||
pub(crate) fn fetch_and_handle_outliers<'a>(
|
pub(crate) fn fetch_and_handle_outliers<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
|
@ -1021,17 +1054,21 @@ impl Service {
|
||||||
) -> AsyncRecursiveType<'a, Vec<(Arc<PduEvent>, Option<BTreeMap<String, CanonicalJsonValue>>)>>
|
) -> AsyncRecursiveType<'a, Vec<(Arc<PduEvent>, Option<BTreeMap<String, CanonicalJsonValue>>)>>
|
||||||
{
|
{
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
let back_off = |id| match services()
|
let back_off = |id| async move {
|
||||||
.globals
|
match services()
|
||||||
.bad_event_ratelimiter
|
.globals
|
||||||
.write()
|
.bad_event_ratelimiter
|
||||||
.unwrap()
|
.write()
|
||||||
.entry(id)
|
.await
|
||||||
{
|
.entry(id)
|
||||||
hash_map::Entry::Vacant(e) => {
|
{
|
||||||
e.insert((Instant::now(), 1));
|
hash_map::Entry::Vacant(e) => {
|
||||||
|
e.insert((Instant::now(), 1));
|
||||||
|
}
|
||||||
|
hash_map::Entry::Occupied(mut e) => {
|
||||||
|
*e.get_mut() = (Instant::now(), e.get().1 + 1)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut pdus = vec![];
|
let mut pdus = vec![];
|
||||||
|
@ -1057,7 +1094,7 @@ impl Service {
|
||||||
.globals
|
.globals
|
||||||
.bad_event_ratelimiter
|
.bad_event_ratelimiter
|
||||||
.read()
|
.read()
|
||||||
.unwrap()
|
.await
|
||||||
.get(&*next_id)
|
.get(&*next_id)
|
||||||
{
|
{
|
||||||
// Exponential backoff
|
// Exponential backoff
|
||||||
|
@ -1104,7 +1141,7 @@ impl Service {
|
||||||
match pdu::gen_event_id_canonical_json(&res.pdu, room_version_id) {
|
match pdu::gen_event_id_canonical_json(&res.pdu, room_version_id) {
|
||||||
Ok(t) => t,
|
Ok(t) => t,
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
back_off((*next_id).to_owned());
|
back_off((*next_id).to_owned()).await;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -1136,7 +1173,7 @@ impl Service {
|
||||||
}
|
}
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
warn!("Failed to fetch event: {}", next_id);
|
warn!("Failed to fetch event: {}", next_id);
|
||||||
back_off((*next_id).to_owned());
|
back_off((*next_id).to_owned()).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1146,7 +1183,7 @@ impl Service {
|
||||||
.globals
|
.globals
|
||||||
.bad_event_ratelimiter
|
.bad_event_ratelimiter
|
||||||
.read()
|
.read()
|
||||||
.unwrap()
|
.await
|
||||||
.get(&**next_id)
|
.get(&**next_id)
|
||||||
{
|
{
|
||||||
// Exponential backoff
|
// Exponential backoff
|
||||||
|
@ -1181,7 +1218,7 @@ impl Service {
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!("Authentication of event {} failed: {:?}", next_id, e);
|
warn!("Authentication of event {} failed: {:?}", next_id, e);
|
||||||
back_off((**next_id).to_owned());
|
back_off((**next_id).to_owned()).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1336,7 +1373,7 @@ impl Service {
|
||||||
|
|
||||||
pub_key_map
|
pub_key_map
|
||||||
.write()
|
.write()
|
||||||
.map_err(|_| Error::bad_database("RwLock is poisoned."))?
|
.await
|
||||||
.insert(signature_server.clone(), keys);
|
.insert(signature_server.clone(), keys);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1345,7 +1382,7 @@ impl Service {
|
||||||
|
|
||||||
// Gets a list of servers for which we don't have the signing key yet. We go over
|
// Gets a list of servers for which we don't have the signing key yet. We go over
|
||||||
// the PDUs and either cache the key or add it to the list that needs to be retrieved.
|
// the PDUs and either cache the key or add it to the list that needs to be retrieved.
|
||||||
fn get_server_keys_from_cache(
|
async fn get_server_keys_from_cache(
|
||||||
&self,
|
&self,
|
||||||
pdu: &RawJsonValue,
|
pdu: &RawJsonValue,
|
||||||
servers: &mut BTreeMap<OwnedServerName, BTreeMap<OwnedServerSigningKeyId, QueryCriteria>>,
|
servers: &mut BTreeMap<OwnedServerName, BTreeMap<OwnedServerSigningKeyId, QueryCriteria>>,
|
||||||
|
@ -1369,7 +1406,7 @@ impl Service {
|
||||||
.globals
|
.globals
|
||||||
.bad_event_ratelimiter
|
.bad_event_ratelimiter
|
||||||
.read()
|
.read()
|
||||||
.unwrap()
|
.await
|
||||||
.get(event_id)
|
.get(event_id)
|
||||||
{
|
{
|
||||||
// Exponential backoff
|
// Exponential backoff
|
||||||
|
@ -1445,17 +1482,19 @@ impl Service {
|
||||||
> = BTreeMap::new();
|
> = BTreeMap::new();
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut pkm = pub_key_map
|
let mut pkm = pub_key_map.write().await;
|
||||||
.write()
|
|
||||||
.map_err(|_| Error::bad_database("RwLock is poisoned."))?;
|
|
||||||
|
|
||||||
// Try to fetch keys, failure is okay
|
// Try to fetch keys, failure is okay
|
||||||
// Servers we couldn't find in the cache will be added to `servers`
|
// Servers we couldn't find in the cache will be added to `servers`
|
||||||
for pdu in &event.room_state.state {
|
for pdu in &event.room_state.state {
|
||||||
let _ = self.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm);
|
let _ = self
|
||||||
|
.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm)
|
||||||
|
.await;
|
||||||
}
|
}
|
||||||
for pdu in &event.room_state.auth_chain {
|
for pdu in &event.room_state.auth_chain {
|
||||||
let _ = self.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm);
|
let _ = self
|
||||||
|
.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm)
|
||||||
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
drop(pkm);
|
drop(pkm);
|
||||||
|
@ -1479,9 +1518,7 @@ impl Service {
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
trace!("Got signing keys: {:?}", keys);
|
trace!("Got signing keys: {:?}", keys);
|
||||||
let mut pkm = pub_key_map
|
let mut pkm = pub_key_map.write().await;
|
||||||
.write()
|
|
||||||
.map_err(|_| Error::bad_database("RwLock is poisoned."))?;
|
|
||||||
for k in keys.server_keys {
|
for k in keys.server_keys {
|
||||||
let k = match k.deserialize() {
|
let k = match k.deserialize() {
|
||||||
Ok(key) => key,
|
Ok(key) => key,
|
||||||
|
@ -1540,10 +1577,7 @@ impl Service {
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(k, v)| (k.to_string(), v.key))
|
.map(|(k, v)| (k.to_string(), v.key))
|
||||||
.collect();
|
.collect();
|
||||||
pub_key_map
|
pub_key_map.write().await.insert(origin.to_string(), result);
|
||||||
.write()
|
|
||||||
.map_err(|_| Error::bad_database("RwLock is poisoned."))?
|
|
||||||
.insert(origin.to_string(), result);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
info!("Done handling result");
|
info!("Done handling result");
|
||||||
|
@ -1608,14 +1642,14 @@ impl Service {
|
||||||
.globals
|
.globals
|
||||||
.servername_ratelimiter
|
.servername_ratelimiter
|
||||||
.read()
|
.read()
|
||||||
.unwrap()
|
.await
|
||||||
.get(origin)
|
.get(origin)
|
||||||
.map(|s| Arc::clone(s).acquire_owned());
|
.map(|s| Arc::clone(s).acquire_owned());
|
||||||
|
|
||||||
let permit = match permit {
|
let permit = match permit {
|
||||||
Some(p) => p,
|
Some(p) => p,
|
||||||
None => {
|
None => {
|
||||||
let mut write = services().globals.servername_ratelimiter.write().unwrap();
|
let mut write = services().globals.servername_ratelimiter.write().await;
|
||||||
let s = Arc::clone(
|
let s = Arc::clone(
|
||||||
write
|
write
|
||||||
.entry(origin.to_owned())
|
.entry(origin.to_owned())
|
||||||
|
@ -1627,24 +1661,26 @@ impl Service {
|
||||||
}
|
}
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let back_off = |id| match services()
|
let back_off = |id| async {
|
||||||
.globals
|
match services()
|
||||||
.bad_signature_ratelimiter
|
.globals
|
||||||
.write()
|
.bad_signature_ratelimiter
|
||||||
.unwrap()
|
.write()
|
||||||
.entry(id)
|
.await
|
||||||
{
|
.entry(id)
|
||||||
hash_map::Entry::Vacant(e) => {
|
{
|
||||||
e.insert((Instant::now(), 1));
|
hash_map::Entry::Vacant(e) => {
|
||||||
|
e.insert((Instant::now(), 1));
|
||||||
|
}
|
||||||
|
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
||||||
}
|
}
|
||||||
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some((time, tries)) = services()
|
if let Some((time, tries)) = services()
|
||||||
.globals
|
.globals
|
||||||
.bad_signature_ratelimiter
|
.bad_signature_ratelimiter
|
||||||
.read()
|
.read()
|
||||||
.unwrap()
|
.await
|
||||||
.get(&signature_ids)
|
.get(&signature_ids)
|
||||||
{
|
{
|
||||||
// Exponential backoff
|
// Exponential backoff
|
||||||
|
@ -1751,7 +1787,7 @@ impl Service {
|
||||||
|
|
||||||
drop(permit);
|
drop(permit);
|
||||||
|
|
||||||
back_off(signature_ids);
|
back_off(signature_ids).await;
|
||||||
|
|
||||||
warn!("Failed to find public key for server: {}", origin);
|
warn!("Failed to find public key for server: {}", origin);
|
||||||
Err(Error::BadServerResponse(
|
Err(Error::BadServerResponse(
|
||||||
|
|
|
@ -1,11 +1,9 @@
|
||||||
mod data;
|
mod data;
|
||||||
use std::{
|
use std::collections::{HashMap, HashSet};
|
||||||
collections::{HashMap, HashSet},
|
|
||||||
sync::Mutex,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub use data::Data;
|
pub use data::Data;
|
||||||
use ruma::{DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, RoomId, UserId};
|
use ruma::{DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, RoomId, UserId};
|
||||||
|
use tokio::sync::Mutex;
|
||||||
|
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|
||||||
|
@ -14,6 +12,7 @@ use super::timeline::PduCount;
|
||||||
pub struct Service {
|
pub struct Service {
|
||||||
pub db: &'static dyn Data,
|
pub db: &'static dyn Data,
|
||||||
|
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
pub lazy_load_waiting:
|
pub lazy_load_waiting:
|
||||||
Mutex<HashMap<(OwnedUserId, OwnedDeviceId, OwnedRoomId, PduCount), HashSet<OwnedUserId>>>,
|
Mutex<HashMap<(OwnedUserId, OwnedDeviceId, OwnedRoomId, PduCount), HashSet<OwnedUserId>>>,
|
||||||
}
|
}
|
||||||
|
@ -32,7 +31,7 @@ impl Service {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
pub fn lazy_load_mark_sent(
|
pub async fn lazy_load_mark_sent(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
device_id: &DeviceId,
|
device_id: &DeviceId,
|
||||||
|
@ -40,7 +39,7 @@ impl Service {
|
||||||
lazy_load: HashSet<OwnedUserId>,
|
lazy_load: HashSet<OwnedUserId>,
|
||||||
count: PduCount,
|
count: PduCount,
|
||||||
) {
|
) {
|
||||||
self.lazy_load_waiting.lock().unwrap().insert(
|
self.lazy_load_waiting.lock().await.insert(
|
||||||
(
|
(
|
||||||
user_id.to_owned(),
|
user_id.to_owned(),
|
||||||
device_id.to_owned(),
|
device_id.to_owned(),
|
||||||
|
@ -52,14 +51,14 @@ impl Service {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
pub fn lazy_load_confirm_delivery(
|
pub async fn lazy_load_confirm_delivery(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
device_id: &DeviceId,
|
device_id: &DeviceId,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
since: PduCount,
|
since: PduCount,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&(
|
if let Some(user_ids) = self.lazy_load_waiting.lock().await.remove(&(
|
||||||
user_id.to_owned(),
|
user_id.to_owned(),
|
||||||
device_id.to_owned(),
|
device_id.to_owned(),
|
||||||
room_id.to_owned(),
|
room_id.to_owned(),
|
||||||
|
|
|
@ -5,6 +5,7 @@ use ruma::{EventId, RoomId, UserId};
|
||||||
|
|
||||||
pub trait Data: Send + Sync {
|
pub trait Data: Send + Sync {
|
||||||
fn add_relation(&self, from: u64, to: u64) -> Result<()>;
|
fn add_relation(&self, from: u64, to: u64) -> Result<()>;
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
fn relations_until<'a>(
|
fn relations_until<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
user_id: &'a UserId,
|
user_id: &'a UserId,
|
||||||
|
|
|
@ -40,6 +40,7 @@ impl Service {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn paginate_relations_with_filter(
|
pub fn paginate_relations_with_filter(
|
||||||
&self,
|
&self,
|
||||||
sender_user: &UserId,
|
sender_user: &UserId,
|
||||||
|
@ -82,7 +83,7 @@ impl Service {
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.user_can_see_event(sender_user, &room_id, &pdu.event_id)
|
.user_can_see_event(sender_user, room_id, &pdu.event_id)
|
||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
})
|
})
|
||||||
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
||||||
|
@ -106,7 +107,7 @@ impl Service {
|
||||||
let events_before: Vec<_> = services()
|
let events_before: Vec<_> = services()
|
||||||
.rooms
|
.rooms
|
||||||
.pdu_metadata
|
.pdu_metadata
|
||||||
.relations_until(sender_user, &room_id, target, from)?
|
.relations_until(sender_user, room_id, target, from)?
|
||||||
.filter(|r| {
|
.filter(|r| {
|
||||||
r.as_ref().map_or(true, |(_, pdu)| {
|
r.as_ref().map_or(true, |(_, pdu)| {
|
||||||
filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t)
|
filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t)
|
||||||
|
@ -129,7 +130,7 @@ impl Service {
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.user_can_see_event(sender_user, &room_id, &pdu.event_id)
|
.user_can_see_event(sender_user, room_id, &pdu.event_id)
|
||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
})
|
})
|
||||||
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
||||||
|
|
|
@ -4,6 +4,7 @@ use ruma::RoomId;
|
||||||
pub trait Data: Send + Sync {
|
pub trait Data: Send + Sync {
|
||||||
fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>;
|
fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>;
|
||||||
|
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
fn search_pdus<'a>(
|
fn search_pdus<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::Arc;
|
||||||
|
|
||||||
use lru_cache::LruCache;
|
use lru_cache::LruCache;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
|
@ -25,6 +25,7 @@ use ruma::{
|
||||||
space::SpaceRoomJoinRule,
|
space::SpaceRoomJoinRule,
|
||||||
OwnedRoomId, RoomId, UserId,
|
OwnedRoomId, RoomId, UserId,
|
||||||
};
|
};
|
||||||
|
use tokio::sync::Mutex;
|
||||||
|
|
||||||
use tracing::{debug, error, warn};
|
use tracing::{debug, error, warn};
|
||||||
|
|
||||||
|
@ -79,7 +80,7 @@ impl Service {
|
||||||
if let Some(cached) = self
|
if let Some(cached) = self
|
||||||
.roomid_spacechunk_cache
|
.roomid_spacechunk_cache
|
||||||
.lock()
|
.lock()
|
||||||
.unwrap()
|
.await
|
||||||
.get_mut(¤t_room.to_owned())
|
.get_mut(¤t_room.to_owned())
|
||||||
.as_ref()
|
.as_ref()
|
||||||
{
|
{
|
||||||
|
@ -134,7 +135,7 @@ impl Service {
|
||||||
|
|
||||||
if serde_json::from_str::<SpaceChildEventContent>(pdu.content.get())
|
if serde_json::from_str::<SpaceChildEventContent>(pdu.content.get())
|
||||||
.ok()
|
.ok()
|
||||||
.and_then(|c| c.via)
|
.map(|c| c.via)
|
||||||
.map_or(true, |v| v.is_empty())
|
.map_or(true, |v| v.is_empty())
|
||||||
{
|
{
|
||||||
continue;
|
continue;
|
||||||
|
@ -171,7 +172,7 @@ impl Service {
|
||||||
.transpose()?
|
.transpose()?
|
||||||
.unwrap_or(JoinRule::Invite);
|
.unwrap_or(JoinRule::Invite);
|
||||||
|
|
||||||
self.roomid_spacechunk_cache.lock().unwrap().insert(
|
self.roomid_spacechunk_cache.lock().await.insert(
|
||||||
current_room.clone(),
|
current_room.clone(),
|
||||||
Some(CachedSpaceChunk {
|
Some(CachedSpaceChunk {
|
||||||
chunk,
|
chunk,
|
||||||
|
@ -185,7 +186,9 @@ impl Service {
|
||||||
stack.push(children_ids);
|
stack.push(children_ids);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let server = current_room.server_name();
|
let server = current_room
|
||||||
|
.server_name()
|
||||||
|
.expect("Room IDs should always have a server name");
|
||||||
if server == services().globals.server_name() {
|
if server == services().globals.server_name() {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -193,11 +196,11 @@ impl Service {
|
||||||
// Early return so the client can see some data already
|
// Early return so the client can see some data already
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
warn!("Asking {server} for /hierarchy");
|
debug!("Asking {server} for /hierarchy");
|
||||||
if let Ok(response) = services()
|
if let Ok(response) = services()
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
&server,
|
server,
|
||||||
federation::space::get_hierarchy::v1::Request {
|
federation::space::get_hierarchy::v1::Request {
|
||||||
room_id: current_room.to_owned(),
|
room_id: current_room.to_owned(),
|
||||||
suggested_only,
|
suggested_only,
|
||||||
|
@ -235,7 +238,7 @@ impl Service {
|
||||||
.room
|
.room
|
||||||
.allowed_room_ids
|
.allowed_room_ids
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|room| AllowRule::room_membership(room))
|
.map(AllowRule::room_membership)
|
||||||
.collect(),
|
.collect(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -245,7 +248,7 @@ impl Service {
|
||||||
.room
|
.room
|
||||||
.allowed_room_ids
|
.allowed_room_ids
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|room| AllowRule::room_membership(room))
|
.map(AllowRule::room_membership)
|
||||||
.collect(),
|
.collect(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -263,7 +266,7 @@ impl Service {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.roomid_spacechunk_cache.lock().unwrap().insert(
|
self.roomid_spacechunk_cache.lock().await.insert(
|
||||||
current_room.clone(),
|
current_room.clone(),
|
||||||
Some(CachedSpaceChunk {
|
Some(CachedSpaceChunk {
|
||||||
chunk,
|
chunk,
|
||||||
|
@ -287,7 +290,7 @@ impl Service {
|
||||||
} else {
|
} else {
|
||||||
self.roomid_spacechunk_cache
|
self.roomid_spacechunk_cache
|
||||||
.lock()
|
.lock()
|
||||||
.unwrap()
|
.await
|
||||||
.insert(current_room.clone(), None);
|
.insert(current_room.clone(), None);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -313,7 +316,7 @@ impl Service {
|
||||||
canonical_alias: services()
|
canonical_alias: services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")?
|
.room_state_get(room_id, &StateEventType::RoomCanonicalAlias, "")?
|
||||||
.map_or(Ok(None), |s| {
|
.map_or(Ok(None), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map(|c: RoomCanonicalAliasEventContent| c.alias)
|
.map(|c: RoomCanonicalAliasEventContent| c.alias)
|
||||||
|
@ -321,11 +324,11 @@ impl Service {
|
||||||
Error::bad_database("Invalid canonical alias event in database.")
|
Error::bad_database("Invalid canonical alias event in database.")
|
||||||
})
|
})
|
||||||
})?,
|
})?,
|
||||||
name: services().rooms.state_accessor.get_name(&room_id)?,
|
name: services().rooms.state_accessor.get_name(room_id)?,
|
||||||
num_joined_members: services()
|
num_joined_members: services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.room_joined_count(&room_id)?
|
.room_joined_count(room_id)?
|
||||||
.unwrap_or_else(|| {
|
.unwrap_or_else(|| {
|
||||||
warn!("Room {} has no member count", room_id);
|
warn!("Room {} has no member count", room_id);
|
||||||
0
|
0
|
||||||
|
@ -336,7 +339,7 @@ impl Service {
|
||||||
topic: services()
|
topic: services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomTopic, "")?
|
.room_state_get(room_id, &StateEventType::RoomTopic, "")?
|
||||||
.map_or(Ok(None), |s| {
|
.map_or(Ok(None), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map(|c: RoomTopicEventContent| Some(c.topic))
|
.map(|c: RoomTopicEventContent| Some(c.topic))
|
||||||
|
@ -348,7 +351,7 @@ impl Service {
|
||||||
world_readable: services()
|
world_readable: services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")?
|
.room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")?
|
||||||
.map_or(Ok(false), |s| {
|
.map_or(Ok(false), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map(|c: RoomHistoryVisibilityEventContent| {
|
.map(|c: RoomHistoryVisibilityEventContent| {
|
||||||
|
@ -363,7 +366,7 @@ impl Service {
|
||||||
guest_can_join: services()
|
guest_can_join: services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")?
|
.room_state_get(room_id, &StateEventType::RoomGuestAccess, "")?
|
||||||
.map_or(Ok(false), |s| {
|
.map_or(Ok(false), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map(|c: RoomGuestAccessEventContent| {
|
.map(|c: RoomGuestAccessEventContent| {
|
||||||
|
@ -376,7 +379,7 @@ impl Service {
|
||||||
avatar_url: services()
|
avatar_url: services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomAvatar, "")?
|
.room_state_get(room_id, &StateEventType::RoomAvatar, "")?
|
||||||
.map(|s| {
|
.map(|s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map(|c: RoomAvatarEventContent| c.url)
|
.map(|c: RoomAvatarEventContent| c.url)
|
||||||
|
@ -389,7 +392,7 @@ impl Service {
|
||||||
let join_rule = services()
|
let join_rule = services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomJoinRules, "")?
|
.room_state_get(room_id, &StateEventType::RoomJoinRules, "")?
|
||||||
.map(|s| {
|
.map(|s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map(|c: RoomJoinRulesEventContent| c.join_rule)
|
.map(|c: RoomJoinRulesEventContent| c.join_rule)
|
||||||
|
@ -415,7 +418,7 @@ impl Service {
|
||||||
room_type: services()
|
room_type: services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomCreate, "")?
|
.room_state_get(room_id, &StateEventType::RoomCreate, "")?
|
||||||
.map(|s| {
|
.map(|s| {
|
||||||
serde_json::from_str::<RoomCreateEventContent>(s.content.get()).map_err(|e| {
|
serde_json::from_str::<RoomCreateEventContent>(s.content.get()).map_err(|e| {
|
||||||
error!("Invalid room create event in database: {}", e);
|
error!("Invalid room create event in database: {}", e);
|
||||||
|
@ -455,7 +458,7 @@ impl Service {
|
||||||
SpaceRoomJoinRule::Invite => services()
|
SpaceRoomJoinRule::Invite => services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.is_joined(sender_user, &room_id)?,
|
.is_joined(sender_user, room_id)?,
|
||||||
_ => false,
|
_ => false,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -479,17 +482,14 @@ impl Service {
|
||||||
match join_rule {
|
match join_rule {
|
||||||
JoinRule::Restricted(r) => {
|
JoinRule::Restricted(r) => {
|
||||||
for rule in &r.allow {
|
for rule in &r.allow {
|
||||||
match rule {
|
if let join_rules::AllowRule::RoomMembership(rm) = rule {
|
||||||
join_rules::AllowRule::RoomMembership(rm) => {
|
if let Ok(true) = services()
|
||||||
if let Ok(true) = services()
|
.rooms
|
||||||
.rooms
|
.state_cache
|
||||||
.state_cache
|
.is_joined(sender_user, &rm.room_id)
|
||||||
.is_joined(sender_user, &rm.room_id)
|
{
|
||||||
{
|
return Ok(true);
|
||||||
return Ok(true);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
_ => {}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -41,7 +41,7 @@ impl Service {
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_compressor
|
.state_compressor
|
||||||
.parse_compressed_state_event(&new)
|
.parse_compressed_state_event(new)
|
||||||
.ok()
|
.ok()
|
||||||
.map(|(_, id)| id)
|
.map(|(_, id)| id)
|
||||||
}) {
|
}) {
|
||||||
|
@ -95,7 +95,7 @@ impl Service {
|
||||||
.spaces
|
.spaces
|
||||||
.roomid_spacechunk_cache
|
.roomid_spacechunk_cache
|
||||||
.lock()
|
.lock()
|
||||||
.unwrap()
|
.await
|
||||||
.remove(&pdu.room_id);
|
.remove(&pdu.room_id);
|
||||||
}
|
}
|
||||||
_ => continue,
|
_ => continue,
|
||||||
|
@ -412,7 +412,7 @@ impl Service {
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_compressor
|
.state_compressor
|
||||||
.parse_compressed_state_event(&compressed)
|
.parse_compressed_state_event(compressed)
|
||||||
.ok()
|
.ok()
|
||||||
})
|
})
|
||||||
.filter_map(|(shortstatekey, event_id)| {
|
.filter_map(|(shortstatekey, event_id)| {
|
||||||
|
|
|
@ -16,11 +16,12 @@ use ruma::{
|
||||||
},
|
},
|
||||||
StateEventType,
|
StateEventType,
|
||||||
},
|
},
|
||||||
EventId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
|
EventId, JsOption, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
|
||||||
};
|
};
|
||||||
use tracing::error;
|
use serde_json::value::to_raw_value;
|
||||||
|
use tracing::{error, warn};
|
||||||
|
|
||||||
use crate::{services, Error, PduEvent, Result};
|
use crate::{service::pdu::PduBuilder, services, Error, PduEvent, Result};
|
||||||
|
|
||||||
pub struct Service {
|
pub struct Service {
|
||||||
pub db: &'static dyn Data,
|
pub db: &'static dyn Data,
|
||||||
|
@ -180,7 +181,7 @@ impl Service {
|
||||||
return Ok(*visibility);
|
return Ok(*visibility);
|
||||||
}
|
}
|
||||||
|
|
||||||
let currently_member = services().rooms.state_cache.is_joined(&user_id, &room_id)?;
|
let currently_member = services().rooms.state_cache.is_joined(user_id, room_id)?;
|
||||||
|
|
||||||
let history_visibility = self
|
let history_visibility = self
|
||||||
.state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "")?
|
.state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "")?
|
||||||
|
@ -197,11 +198,11 @@ impl Service {
|
||||||
HistoryVisibility::Shared => currently_member,
|
HistoryVisibility::Shared => currently_member,
|
||||||
HistoryVisibility::Invited => {
|
HistoryVisibility::Invited => {
|
||||||
// Allow if any member on requesting server was AT LEAST invited, else deny
|
// Allow if any member on requesting server was AT LEAST invited, else deny
|
||||||
self.user_was_invited(shortstatehash, &user_id)
|
self.user_was_invited(shortstatehash, user_id)
|
||||||
}
|
}
|
||||||
HistoryVisibility::Joined => {
|
HistoryVisibility::Joined => {
|
||||||
// Allow if any member on requested server was joined, else deny
|
// Allow if any member on requested server was joined, else deny
|
||||||
self.user_was_joined(shortstatehash, &user_id)
|
self.user_was_joined(shortstatehash, user_id)
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
error!("Unknown history visibility {history_visibility}");
|
error!("Unknown history visibility {history_visibility}");
|
||||||
|
@ -221,10 +222,10 @@ impl Service {
|
||||||
/// the room's history_visibility at that event's state.
|
/// the room's history_visibility at that event's state.
|
||||||
#[tracing::instrument(skip(self, user_id, room_id))]
|
#[tracing::instrument(skip(self, user_id, room_id))]
|
||||||
pub fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> Result<bool> {
|
pub fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> Result<bool> {
|
||||||
let currently_member = services().rooms.state_cache.is_joined(&user_id, &room_id)?;
|
let currently_member = services().rooms.state_cache.is_joined(user_id, room_id)?;
|
||||||
|
|
||||||
let history_visibility = self
|
let history_visibility = self
|
||||||
.room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")?
|
.room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")?
|
||||||
.map_or(Ok(HistoryVisibility::Shared), |s| {
|
.map_or(Ok(HistoryVisibility::Shared), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map(|c: RoomHistoryVisibilityEventContent| c.history_visibility)
|
.map(|c: RoomHistoryVisibilityEventContent| c.history_visibility)
|
||||||
|
@ -276,25 +277,66 @@ impl Service {
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomName, "")?
|
.room_state_get(room_id, &StateEventType::RoomName, "")?
|
||||||
.map_or(Ok(None), |s| {
|
.map_or(Ok(None), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map(|c: RoomNameEventContent| c.name)
|
.map(|c: RoomNameEventContent| Some(c.name))
|
||||||
.map_err(|_| Error::bad_database("Invalid room name event in database."))
|
.map_err(|e| {
|
||||||
|
error!(
|
||||||
|
"Invalid room name event in database for room {}. {}",
|
||||||
|
room_id, e
|
||||||
|
);
|
||||||
|
Error::bad_database("Invalid room name event in database.")
|
||||||
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_avatar(&self, room_id: &RoomId) -> Result<Option<RoomAvatarEventContent>> {
|
pub fn get_avatar(&self, room_id: &RoomId) -> Result<JsOption<RoomAvatarEventContent>> {
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomAvatar, "")?
|
.room_state_get(room_id, &StateEventType::RoomAvatar, "")?
|
||||||
.map_or(Ok(None), |s| {
|
.map_or(Ok(JsOption::Undefined), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map_err(|_| Error::bad_database("Invalid room avatar event in database."))
|
.map_err(|_| Error::bad_database("Invalid room avatar event in database."))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn user_can_invite(
|
||||||
|
&self,
|
||||||
|
room_id: &RoomId,
|
||||||
|
sender: &UserId,
|
||||||
|
target_user: &UserId,
|
||||||
|
) -> Result<bool> {
|
||||||
|
let content = to_raw_value(&RoomMemberEventContent::new(MembershipState::Invite))
|
||||||
|
.expect("Event content always serializes");
|
||||||
|
|
||||||
|
let new_event = PduBuilder {
|
||||||
|
event_type: ruma::events::TimelineEventType::RoomMember,
|
||||||
|
content,
|
||||||
|
unsigned: None,
|
||||||
|
state_key: Some(target_user.into()),
|
||||||
|
redacts: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mutex_state = Arc::clone(
|
||||||
|
services()
|
||||||
|
.globals
|
||||||
|
.roomid_mutex_state
|
||||||
|
.write()
|
||||||
|
.await
|
||||||
|
.entry(room_id.to_owned())
|
||||||
|
.or_default(),
|
||||||
|
);
|
||||||
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
|
Ok(services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.create_hash_and_sign_event(new_event, sender, room_id, &state_lock)
|
||||||
|
.is_ok())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn get_member(
|
pub fn get_member(
|
||||||
&self,
|
&self,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
|
@ -303,7 +345,7 @@ impl Service {
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomMember, user_id.as_str())?
|
.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())?
|
||||||
.map_or(Ok(None), |s| {
|
.map_or(Ok(None), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map_err(|_| Error::bad_database("Invalid room member event in database."))
|
.map_err(|_| Error::bad_database("Invalid room member event in database."))
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use std::{collections::HashSet, sync::Arc};
|
use std::{collections::HashSet, sync::Arc};
|
||||||
|
|
||||||
use crate::Result;
|
use crate::{service::appservice::RegistrationInfo, Result};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
events::{AnyStrippedStateEvent, AnySyncStateEvent},
|
events::{AnyStrippedStateEvent, AnySyncStateEvent},
|
||||||
serde::Raw,
|
serde::Raw,
|
||||||
|
@ -22,11 +22,7 @@ pub trait Data: Send + Sync {
|
||||||
|
|
||||||
fn get_our_real_users(&self, room_id: &RoomId) -> Result<Arc<HashSet<OwnedUserId>>>;
|
fn get_our_real_users(&self, room_id: &RoomId) -> Result<Arc<HashSet<OwnedUserId>>>;
|
||||||
|
|
||||||
fn appservice_in_room(
|
fn appservice_in_room(&self, room_id: &RoomId, appservice: &RegistrationInfo) -> Result<bool>;
|
||||||
&self,
|
|
||||||
room_id: &RoomId,
|
|
||||||
appservice: &(String, serde_yaml::Value),
|
|
||||||
) -> Result<bool>;
|
|
||||||
|
|
||||||
/// Makes a user forget a room.
|
/// Makes a user forget a room.
|
||||||
fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()>;
|
fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()>;
|
||||||
|
@ -78,6 +74,7 @@ pub trait Data: Send + Sync {
|
||||||
) -> Box<dyn Iterator<Item = Result<OwnedRoomId>> + 'a>;
|
) -> Box<dyn Iterator<Item = Result<OwnedRoomId>> + 'a>;
|
||||||
|
|
||||||
/// Returns an iterator over all rooms a user was invited to.
|
/// Returns an iterator over all rooms a user was invited to.
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
fn rooms_invited<'a>(
|
fn rooms_invited<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
|
@ -96,6 +93,7 @@ pub trait Data: Send + Sync {
|
||||||
) -> Result<Option<Vec<Raw<AnyStrippedStateEvent>>>>;
|
) -> Result<Option<Vec<Raw<AnyStrippedStateEvent>>>>;
|
||||||
|
|
||||||
/// Returns an iterator over all rooms a user left.
|
/// Returns an iterator over all rooms a user left.
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
fn rooms_left<'a>(
|
fn rooms_left<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
|
|
|
@ -16,7 +16,7 @@ use ruma::{
|
||||||
};
|
};
|
||||||
use tracing::warn;
|
use tracing::warn;
|
||||||
|
|
||||||
use crate::{services, Error, Result};
|
use crate::{service::appservice::RegistrationInfo, services, Error, Result};
|
||||||
|
|
||||||
pub struct Service {
|
pub struct Service {
|
||||||
pub db: &'static dyn Data,
|
pub db: &'static dyn Data,
|
||||||
|
@ -205,7 +205,7 @@ impl Service {
|
||||||
pub fn appservice_in_room(
|
pub fn appservice_in_room(
|
||||||
&self,
|
&self,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
appservice: &(String, serde_yaml::Value),
|
appservice: &RegistrationInfo,
|
||||||
) -> Result<bool> {
|
) -> Result<bool> {
|
||||||
self.db.appservice_in_room(room_id, appservice)
|
self.db.appservice_in_room(room_id, appservice)
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue