Compare commits
3 commits
next
...
Nyaaori/re
Author | SHA1 | Date | |
---|---|---|---|
|
9d4f2884e1 | ||
|
ccc5030896 | ||
|
e8d435c541 |
164 changed files with 7395 additions and 14579 deletions
|
@ -1,15 +0,0 @@
|
||||||
# EditorConfig is awesome: https://EditorConfig.org
|
|
||||||
|
|
||||||
root = true
|
|
||||||
|
|
||||||
[*]
|
|
||||||
charset = utf-8
|
|
||||||
end_of_line = lf
|
|
||||||
tab_width = 4
|
|
||||||
indent_size = 4
|
|
||||||
indent_style = space
|
|
||||||
insert_final_newline = true
|
|
||||||
max_line_length = 120
|
|
||||||
|
|
||||||
[*.nix]
|
|
||||||
indent_size = 2
|
|
4
.envrc
4
.envrc
|
@ -1,5 +1 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
use flake
|
use flake
|
||||||
|
|
||||||
PATH_add bin
|
|
||||||
|
|
7
.gitignore
vendored
7
.gitignore
vendored
|
@ -61,16 +61,9 @@ conduit.db
|
||||||
|
|
||||||
# Etc.
|
# Etc.
|
||||||
**/*.rs.bk
|
**/*.rs.bk
|
||||||
cached_target
|
|
||||||
|
|
||||||
# Nix artifacts
|
# Nix artifacts
|
||||||
/result*
|
/result*
|
||||||
|
|
||||||
# Direnv cache
|
# Direnv cache
|
||||||
/.direnv
|
/.direnv
|
||||||
|
|
||||||
# Gitlab CI cache
|
|
||||||
/.gitlab-ci.d
|
|
||||||
|
|
||||||
# mdbook output
|
|
||||||
public/
|
|
417
.gitlab-ci.yml
417
.gitlab-ci.yml
|
@ -1,197 +1,238 @@
|
||||||
stages:
|
stages:
|
||||||
- ci
|
- build
|
||||||
- artifacts
|
- build docker image
|
||||||
- publish
|
- test
|
||||||
|
- upload artifacts
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
# Makes some things print in color
|
# Make GitLab CI go fast:
|
||||||
TERM: ansi
|
GIT_SUBMODULE_STRATEGY: recursive
|
||||||
# Faster cache and artifact compression / decompression
|
FF_USE_FASTZIP: 1
|
||||||
FF_USE_FASTZIP: true
|
CACHE_COMPRESSION_LEVEL: fastest
|
||||||
# Print progress reports for cache and artifact transfers
|
|
||||||
TRANSFER_METER_FREQUENCY: 5s
|
# --------------------------------------------------------------------- #
|
||||||
|
# Create and publish docker image #
|
||||||
|
# --------------------------------------------------------------------- #
|
||||||
|
|
||||||
|
.docker-shared-settings:
|
||||||
|
stage: "build docker image"
|
||||||
|
image: jdrouet/docker-with-buildx:20.10.21-0.9.1
|
||||||
|
needs: []
|
||||||
|
tags: ["docker"]
|
||||||
|
variables:
|
||||||
|
# Docker in Docker:
|
||||||
|
DOCKER_HOST: tcp://docker:2375/
|
||||||
|
DOCKER_TLS_CERTDIR: ""
|
||||||
|
DOCKER_DRIVER: overlay2
|
||||||
|
services:
|
||||||
|
- docker:dind
|
||||||
|
script:
|
||||||
|
- apk add openssh-client
|
||||||
|
- eval $(ssh-agent -s)
|
||||||
|
- mkdir -p ~/.ssh && chmod 700 ~/.ssh
|
||||||
|
- printf "Host *\n\tStrictHostKeyChecking no\n\n" >> ~/.ssh/config
|
||||||
|
- sh .gitlab/setup-buildx-remote-builders.sh
|
||||||
|
# Authorize against this project's own image registry:
|
||||||
|
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
|
||||||
|
# Build multiplatform image and push to temporary tag:
|
||||||
|
- >
|
||||||
|
docker buildx build
|
||||||
|
--platform "linux/arm/v7,linux/arm64,linux/amd64"
|
||||||
|
--pull
|
||||||
|
--tag "$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
|
||||||
|
--push
|
||||||
|
--file "Dockerfile" .
|
||||||
|
# Build multiplatform image to deb stage and extract their .deb files:
|
||||||
|
- >
|
||||||
|
docker buildx build
|
||||||
|
--platform "linux/arm/v7,linux/arm64,linux/amd64"
|
||||||
|
--target "packager-result"
|
||||||
|
--output="type=local,dest=/tmp/build-output"
|
||||||
|
--file "Dockerfile" .
|
||||||
|
# Build multiplatform image to binary stage and extract their binaries:
|
||||||
|
- >
|
||||||
|
docker buildx build
|
||||||
|
--platform "linux/arm/v7,linux/arm64,linux/amd64"
|
||||||
|
--target "builder-result"
|
||||||
|
--output="type=local,dest=/tmp/build-output"
|
||||||
|
--file "Dockerfile" .
|
||||||
|
# Copy to GitLab container registry:
|
||||||
|
- >
|
||||||
|
docker buildx imagetools create
|
||||||
|
--tag "$CI_REGISTRY_IMAGE/$TAG"
|
||||||
|
--tag "$CI_REGISTRY_IMAGE/$TAG-bullseye"
|
||||||
|
--tag "$CI_REGISTRY_IMAGE/$TAG-commit-$CI_COMMIT_SHORT_SHA"
|
||||||
|
"$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
|
||||||
|
# if DockerHub credentials exist, also copy to dockerhub:
|
||||||
|
- if [ -n "${DOCKER_HUB}" ]; then docker login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD" "$DOCKER_HUB"; fi
|
||||||
|
- >
|
||||||
|
if [ -n "${DOCKER_HUB}" ]; then
|
||||||
|
docker buildx imagetools create
|
||||||
|
--tag "$DOCKER_HUB_IMAGE/$TAG"
|
||||||
|
--tag "$DOCKER_HUB_IMAGE/$TAG-bullseye"
|
||||||
|
--tag "$DOCKER_HUB_IMAGE/$TAG-commit-$CI_COMMIT_SHORT_SHA"
|
||||||
|
"$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
|
||||||
|
; fi
|
||||||
|
- mv /tmp/build-output ./
|
||||||
|
artifacts:
|
||||||
|
paths:
|
||||||
|
- "./build-output/"
|
||||||
|
|
||||||
|
docker:next:
|
||||||
|
extends: .docker-shared-settings
|
||||||
|
rules:
|
||||||
|
- if: '$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_BRANCH == "next"'
|
||||||
|
variables:
|
||||||
|
TAG: "matrix-conduit:next"
|
||||||
|
|
||||||
|
docker:master:
|
||||||
|
extends: .docker-shared-settings
|
||||||
|
rules:
|
||||||
|
- if: '$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_BRANCH == "master"'
|
||||||
|
variables:
|
||||||
|
TAG: "matrix-conduit:latest"
|
||||||
|
|
||||||
|
docker:tags:
|
||||||
|
extends: .docker-shared-settings
|
||||||
|
rules:
|
||||||
|
- if: "$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_TAG"
|
||||||
|
variables:
|
||||||
|
TAG: "matrix-conduit:$CI_COMMIT_TAG"
|
||||||
|
|
||||||
|
|
||||||
|
# --------------------------------------------------------------------- #
|
||||||
|
# Run tests #
|
||||||
|
# --------------------------------------------------------------------- #
|
||||||
|
|
||||||
|
cargo check:
|
||||||
|
stage: test
|
||||||
|
image: docker.io/rust:1.64.0-bullseye
|
||||||
|
needs: []
|
||||||
|
interruptible: true
|
||||||
|
before_script:
|
||||||
|
- "rustup show && rustc --version && cargo --version" # Print version info for debugging
|
||||||
|
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
|
||||||
|
script:
|
||||||
|
- cargo check
|
||||||
|
|
||||||
|
|
||||||
|
.test-shared-settings:
|
||||||
|
stage: "test"
|
||||||
|
needs: []
|
||||||
|
image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest"
|
||||||
|
tags: ["docker"]
|
||||||
|
variables:
|
||||||
|
CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow
|
||||||
|
interruptible: true
|
||||||
|
|
||||||
|
test:cargo:
|
||||||
|
extends: .test-shared-settings
|
||||||
|
before_script:
|
||||||
|
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
|
||||||
|
script:
|
||||||
|
- rustc --version && cargo --version # Print version info for debugging
|
||||||
|
- "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | gitlab-report -p test > $CI_PROJECT_DIR/report.xml"
|
||||||
|
artifacts:
|
||||||
|
when: always
|
||||||
|
reports:
|
||||||
|
junit: report.xml
|
||||||
|
|
||||||
|
test:clippy:
|
||||||
|
extends: .test-shared-settings
|
||||||
|
allow_failure: true
|
||||||
|
before_script:
|
||||||
|
- rustup component add clippy
|
||||||
|
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
|
||||||
|
script:
|
||||||
|
- rustc --version && cargo --version # Print version info for debugging
|
||||||
|
- "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json"
|
||||||
|
artifacts:
|
||||||
|
when: always
|
||||||
|
reports:
|
||||||
|
codequality: gl-code-quality-report.json
|
||||||
|
|
||||||
|
test:format:
|
||||||
|
extends: .test-shared-settings
|
||||||
|
before_script:
|
||||||
|
- rustup component add rustfmt
|
||||||
|
script:
|
||||||
|
- cargo fmt --all -- --check
|
||||||
|
|
||||||
|
test:audit:
|
||||||
|
extends: .test-shared-settings
|
||||||
|
allow_failure: true
|
||||||
|
script:
|
||||||
|
- cargo audit --color always || true
|
||||||
|
- cargo audit --stale --json | gitlab-report -p audit > gl-sast-report.json
|
||||||
|
artifacts:
|
||||||
|
when: always
|
||||||
|
reports:
|
||||||
|
sast: gl-sast-report.json
|
||||||
|
|
||||||
|
test:dockerlint:
|
||||||
|
stage: "test"
|
||||||
|
needs: []
|
||||||
|
image: "ghcr.io/hadolint/hadolint@sha256:6c4b7c23f96339489dd35f21a711996d7ce63047467a9a562287748a03ad5242" # 2.8.0-alpine
|
||||||
|
interruptible: true
|
||||||
|
script:
|
||||||
|
- hadolint --version
|
||||||
|
# First pass: Print for CI log:
|
||||||
|
- >
|
||||||
|
hadolint
|
||||||
|
--no-fail --verbose
|
||||||
|
./Dockerfile
|
||||||
|
# Then output the results into a json for GitLab to pretty-print this in the MR:
|
||||||
|
- >
|
||||||
|
hadolint
|
||||||
|
--format gitlab_codeclimate
|
||||||
|
--failure-threshold error
|
||||||
|
./Dockerfile > dockerlint.json
|
||||||
|
artifacts:
|
||||||
|
when: always
|
||||||
|
reports:
|
||||||
|
codequality: dockerlint.json
|
||||||
|
paths:
|
||||||
|
- dockerlint.json
|
||||||
|
rules:
|
||||||
|
- if: '$CI_COMMIT_REF_NAME != "master"'
|
||||||
|
changes:
|
||||||
|
- docker/*Dockerfile
|
||||||
|
- Dockerfile
|
||||||
|
- .gitlab-ci.yml
|
||||||
|
- if: '$CI_COMMIT_REF_NAME == "master"'
|
||||||
|
- if: '$CI_COMMIT_REF_NAME == "next"'
|
||||||
|
|
||||||
|
# --------------------------------------------------------------------- #
|
||||||
|
# Store binaries as package so they have download urls #
|
||||||
|
# --------------------------------------------------------------------- #
|
||||||
|
|
||||||
|
# DISABLED FOR NOW, NEEDS TO BE FIXED AT A LATER TIME:
|
||||||
|
|
||||||
|
#publish:package:
|
||||||
|
# stage: "upload artifacts"
|
||||||
|
# needs:
|
||||||
|
# - "docker:tags"
|
||||||
|
# rules:
|
||||||
|
# - if: "$CI_COMMIT_TAG"
|
||||||
|
# image: curlimages/curl:latest
|
||||||
|
# tags: ["docker"]
|
||||||
|
# variables:
|
||||||
|
# GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts
|
||||||
|
# script:
|
||||||
|
# - 'BASE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}"'
|
||||||
|
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_amd64/conduit "${BASE_URL}/conduit-x86_64-unknown-linux-gnu"'
|
||||||
|
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm_v7/conduit "${BASE_URL}/conduit-armv7-unknown-linux-gnu"'
|
||||||
|
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm64/conduit "${BASE_URL}/conduit-aarch64-unknown-linux-gnu"'
|
||||||
|
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_amd64/conduit.deb "${BASE_URL}/conduit-x86_64-unknown-linux-gnu.deb"'
|
||||||
|
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm_v7/conduit.deb "${BASE_URL}/conduit-armv7-unknown-linux-gnu.deb"'
|
||||||
|
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm64/conduit.deb "${BASE_URL}/conduit-aarch64-unknown-linux-gnu.deb"'
|
||||||
|
|
||||||
# Avoid duplicate pipelines
|
# Avoid duplicate pipelines
|
||||||
# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
|
# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
|
||||||
workflow:
|
workflow:
|
||||||
rules:
|
rules:
|
||||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
|
||||||
- if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS
|
- if: "$CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS"
|
||||||
when: never
|
when: never
|
||||||
- if: $CI
|
- if: "$CI_COMMIT_BRANCH"
|
||||||
|
- if: "$CI_COMMIT_TAG"
|
||||||
before_script:
|
|
||||||
# Enable nix-command and flakes
|
|
||||||
- if command -v nix > /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi
|
|
||||||
|
|
||||||
# Add our own binary cache
|
|
||||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://attic.conduit.rs/conduit" >> /etc/nix/nix.conf; fi
|
|
||||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduit:ddcaWZiWm0l0IXZlO8FERRdWvEufwmd0Negl1P+c0Ns=" >> /etc/nix/nix.conf; fi
|
|
||||||
|
|
||||||
# Add alternate binary cache
|
|
||||||
- if command -v nix > /dev/null && [ -n "$ATTIC_ENDPOINT" ]; then echo "extra-substituters = $ATTIC_ENDPOINT" >> /etc/nix/nix.conf; fi
|
|
||||||
- if command -v nix > /dev/null && [ -n "$ATTIC_PUBLIC_KEY" ]; then echo "extra-trusted-public-keys = $ATTIC_PUBLIC_KEY" >> /etc/nix/nix.conf; fi
|
|
||||||
|
|
||||||
# Add crane binary cache
|
|
||||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf; fi
|
|
||||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf; fi
|
|
||||||
|
|
||||||
# Add nix-community binary cache
|
|
||||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf; fi
|
|
||||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf; fi
|
|
||||||
|
|
||||||
# Install direnv and nix-direnv
|
|
||||||
- if command -v nix > /dev/null; then nix-env -iA nixpkgs.direnv nixpkgs.nix-direnv; fi
|
|
||||||
|
|
||||||
# Allow .envrc
|
|
||||||
- if command -v nix > /dev/null; then direnv allow; fi
|
|
||||||
|
|
||||||
# Set CARGO_HOME to a cacheable path
|
|
||||||
- export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo"
|
|
||||||
|
|
||||||
# Cache attic client
|
|
||||||
- if command -v nix > /dev/null; then ./bin/nix-build-and-cache --inputs-from . attic; fi
|
|
||||||
|
|
||||||
ci:
|
|
||||||
stage: ci
|
|
||||||
image: nixos/nix:2.22.0
|
|
||||||
script:
|
|
||||||
# Cache the inputs required for the devShell
|
|
||||||
- ./bin/nix-build-and-cache .#devShells.x86_64-linux.default.inputDerivation
|
|
||||||
|
|
||||||
- direnv exec . engage
|
|
||||||
cache:
|
|
||||||
key: nix
|
|
||||||
paths:
|
|
||||||
- target
|
|
||||||
- .gitlab-ci.d
|
|
||||||
rules:
|
|
||||||
# CI on upstream runners (only available for maintainers)
|
|
||||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event" && $IS_UPSTREAM_CI == "true"
|
|
||||||
# Manual CI on unprotected branches that are not MRs
|
|
||||||
- if: $CI_PIPELINE_SOURCE != "merge_request_event" && $CI_COMMIT_REF_PROTECTED == "false"
|
|
||||||
when: manual
|
|
||||||
# Manual CI on forks
|
|
||||||
- if: $IS_UPSTREAM_CI != "true"
|
|
||||||
when: manual
|
|
||||||
- if: $CI
|
|
||||||
interruptible: true
|
|
||||||
|
|
||||||
artifacts:
|
|
||||||
stage: artifacts
|
|
||||||
image: nixos/nix:2.22.0
|
|
||||||
script:
|
|
||||||
- ./bin/nix-build-and-cache .#static-x86_64-unknown-linux-musl
|
|
||||||
- cp result/bin/conduit x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
- mkdir -p target/release
|
|
||||||
- cp result/bin/conduit target/release
|
|
||||||
- direnv exec . cargo deb --no-build
|
|
||||||
- mv target/debian/*.deb x86_64-unknown-linux-musl.deb
|
|
||||||
|
|
||||||
# Since the OCI image package is based on the binary package, this has the
|
|
||||||
# fun side effect of uploading the normal binary too. Conduit users who are
|
|
||||||
# deploying with Nix can leverage this fact by adding our binary cache to
|
|
||||||
# their systems.
|
|
||||||
#
|
|
||||||
# Note that although we have an `oci-image-x86_64-unknown-linux-musl`
|
|
||||||
# output, we don't build it because it would be largely redundant to this
|
|
||||||
# one since it's all containerized anyway.
|
|
||||||
- ./bin/nix-build-and-cache .#oci-image
|
|
||||||
- cp result oci-image-amd64.tar.gz
|
|
||||||
|
|
||||||
- ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl
|
|
||||||
- cp result/bin/conduit aarch64-unknown-linux-musl
|
|
||||||
|
|
||||||
- mkdir -p target/aarch64-unknown-linux-musl/release
|
|
||||||
- cp result/bin/conduit target/aarch64-unknown-linux-musl/release
|
|
||||||
- direnv exec . cargo deb --no-strip --no-build --target aarch64-unknown-linux-musl
|
|
||||||
- mv target/aarch64-unknown-linux-musl/debian/*.deb aarch64-unknown-linux-musl.deb
|
|
||||||
|
|
||||||
- ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl
|
|
||||||
- cp result oci-image-arm64v8.tar.gz
|
|
||||||
|
|
||||||
- ./bin/nix-build-and-cache .#book
|
|
||||||
# We can't just copy the symlink, we need to dereference it https://gitlab.com/gitlab-org/gitlab/-/issues/19746
|
|
||||||
- cp -r --dereference result public
|
|
||||||
artifacts:
|
|
||||||
paths:
|
|
||||||
- x86_64-unknown-linux-musl
|
|
||||||
- aarch64-unknown-linux-musl
|
|
||||||
- x86_64-unknown-linux-musl.deb
|
|
||||||
- aarch64-unknown-linux-musl.deb
|
|
||||||
- oci-image-amd64.tar.gz
|
|
||||||
- oci-image-arm64v8.tar.gz
|
|
||||||
- public
|
|
||||||
rules:
|
|
||||||
# CI required for all MRs
|
|
||||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
|
||||||
# Optional CI on forks
|
|
||||||
- if: $IS_UPSTREAM_CI != "true"
|
|
||||||
when: manual
|
|
||||||
allow_failure: true
|
|
||||||
- if: $CI
|
|
||||||
interruptible: true
|
|
||||||
|
|
||||||
.push-oci-image:
|
|
||||||
stage: publish
|
|
||||||
image: docker:25.0.0
|
|
||||||
services:
|
|
||||||
- docker:25.0.0-dind
|
|
||||||
variables:
|
|
||||||
IMAGE_SUFFIX_AMD64: amd64
|
|
||||||
IMAGE_SUFFIX_ARM64V8: arm64v8
|
|
||||||
script:
|
|
||||||
- docker load -i oci-image-amd64.tar.gz
|
|
||||||
- IMAGE_ID_AMD64=$(docker images -q conduit:next)
|
|
||||||
- docker load -i oci-image-arm64v8.tar.gz
|
|
||||||
- IMAGE_ID_ARM64V8=$(docker images -q conduit:next)
|
|
||||||
# Tag and push the architecture specific images
|
|
||||||
- docker tag $IMAGE_ID_AMD64 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64
|
|
||||||
- docker tag $IMAGE_ID_ARM64V8 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
|
||||||
- docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64
|
|
||||||
- docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
|
||||||
# Tag the multi-arch image
|
|
||||||
- docker manifest create $IMAGE_NAME:$CI_COMMIT_SHA --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
|
||||||
- docker manifest push $IMAGE_NAME:$CI_COMMIT_SHA
|
|
||||||
# Tag and push the git ref
|
|
||||||
- docker manifest create $IMAGE_NAME:$CI_COMMIT_REF_NAME --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
|
||||||
- docker manifest push $IMAGE_NAME:$CI_COMMIT_REF_NAME
|
|
||||||
# Tag git tags as 'latest'
|
|
||||||
- |
|
|
||||||
if [[ -n "$CI_COMMIT_TAG" ]]; then
|
|
||||||
docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
|
||||||
docker manifest push $IMAGE_NAME:latest
|
|
||||||
fi
|
|
||||||
dependencies:
|
|
||||||
- artifacts
|
|
||||||
only:
|
|
||||||
- next
|
|
||||||
- master
|
|
||||||
- tags
|
|
||||||
|
|
||||||
oci-image:push-gitlab:
|
|
||||||
extends: .push-oci-image
|
|
||||||
variables:
|
|
||||||
IMAGE_NAME: $CI_REGISTRY_IMAGE/matrix-conduit
|
|
||||||
before_script:
|
|
||||||
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
|
||||||
|
|
||||||
oci-image:push-dockerhub:
|
|
||||||
extends: .push-oci-image
|
|
||||||
variables:
|
|
||||||
IMAGE_NAME: matrixconduit/matrix-conduit
|
|
||||||
before_script:
|
|
||||||
- docker login -u $DOCKER_HUB_USER -p $DOCKER_HUB_PASSWORD
|
|
||||||
|
|
||||||
pages:
|
|
||||||
stage: publish
|
|
||||||
dependencies:
|
|
||||||
- artifacts
|
|
||||||
only:
|
|
||||||
- next
|
|
||||||
script:
|
|
||||||
- "true"
|
|
||||||
artifacts:
|
|
||||||
paths:
|
|
||||||
- public
|
|
||||||
|
|
|
@ -1,3 +0,0 @@
|
||||||
# Docs: Map markdown to html files
|
|
||||||
- source: /docs/(.+)\.md/
|
|
||||||
public: '\1.html'
|
|
|
@ -1,134 +0,0 @@
|
||||||
|
|
||||||
# Contributor Covenant Code of Conduct
|
|
||||||
|
|
||||||
## Our Pledge
|
|
||||||
|
|
||||||
We as members, contributors, and leaders pledge to make participation in our
|
|
||||||
community a harassment-free experience for everyone, regardless of age, body
|
|
||||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
|
||||||
identity and expression, level of experience, education, socio-economic status,
|
|
||||||
nationality, personal appearance, race, caste, color, religion, or sexual
|
|
||||||
identity and orientation.
|
|
||||||
|
|
||||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
|
||||||
diverse, inclusive, and healthy community.
|
|
||||||
|
|
||||||
## Our Standards
|
|
||||||
|
|
||||||
Examples of behavior that contributes to a positive environment for our
|
|
||||||
community include:
|
|
||||||
|
|
||||||
* Demonstrating empathy and kindness toward other people
|
|
||||||
* Being respectful of differing opinions, viewpoints, and experiences
|
|
||||||
* Giving and gracefully accepting constructive feedback
|
|
||||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
|
||||||
and learning from the experience
|
|
||||||
* Focusing on what is best not just for us as individuals, but for the overall
|
|
||||||
community
|
|
||||||
|
|
||||||
Examples of unacceptable behavior include:
|
|
||||||
|
|
||||||
* The use of sexualized language or imagery, and sexual attention or advances of
|
|
||||||
any kind
|
|
||||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
|
||||||
* Public or private harassment
|
|
||||||
* Publishing others' private information, such as a physical or email address,
|
|
||||||
without their explicit permission
|
|
||||||
* Other conduct which could reasonably be considered inappropriate in a
|
|
||||||
professional setting
|
|
||||||
|
|
||||||
## Enforcement Responsibilities
|
|
||||||
|
|
||||||
Community leaders are responsible for clarifying and enforcing our standards of
|
|
||||||
acceptable behavior and will take appropriate and fair corrective action in
|
|
||||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
|
||||||
or harmful.
|
|
||||||
|
|
||||||
Community leaders have the right and responsibility to remove, edit, or reject
|
|
||||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
|
||||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
|
||||||
decisions when appropriate.
|
|
||||||
|
|
||||||
## Scope
|
|
||||||
|
|
||||||
This Code of Conduct applies within all community spaces, and also applies when
|
|
||||||
an individual is officially representing the community in public spaces.
|
|
||||||
Examples of representing our community include using an official e-mail address,
|
|
||||||
posting via an official social media account, or acting as an appointed
|
|
||||||
representative at an online or offline event.
|
|
||||||
|
|
||||||
## Enforcement
|
|
||||||
|
|
||||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
|
||||||
reported to the community leaders responsible for enforcement over email at
|
|
||||||
coc@koesters.xyz or over Matrix at @timo:conduit.rs.
|
|
||||||
All complaints will be reviewed and investigated promptly and fairly.
|
|
||||||
|
|
||||||
All community leaders are obligated to respect the privacy and security of the
|
|
||||||
reporter of any incident.
|
|
||||||
|
|
||||||
## Enforcement Guidelines
|
|
||||||
|
|
||||||
Community leaders will follow these Community Impact Guidelines in determining
|
|
||||||
the consequences for any action they deem in violation of this Code of Conduct:
|
|
||||||
|
|
||||||
### 1. Correction
|
|
||||||
|
|
||||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
|
||||||
unprofessional or unwelcome in the community.
|
|
||||||
|
|
||||||
**Consequence**: A private, written warning from community leaders, providing
|
|
||||||
clarity around the nature of the violation and an explanation of why the
|
|
||||||
behavior was inappropriate. A public apology may be requested.
|
|
||||||
|
|
||||||
### 2. Warning
|
|
||||||
|
|
||||||
**Community Impact**: A violation through a single incident or series of
|
|
||||||
actions.
|
|
||||||
|
|
||||||
**Consequence**: A warning with consequences for continued behavior. No
|
|
||||||
interaction with the people involved, including unsolicited interaction with
|
|
||||||
those enforcing the Code of Conduct, for a specified period of time. This
|
|
||||||
includes avoiding interactions in community spaces as well as external channels
|
|
||||||
like social media. Violating these terms may lead to a temporary or permanent
|
|
||||||
ban.
|
|
||||||
|
|
||||||
### 3. Temporary Ban
|
|
||||||
|
|
||||||
**Community Impact**: A serious violation of community standards, including
|
|
||||||
sustained inappropriate behavior.
|
|
||||||
|
|
||||||
**Consequence**: A temporary ban from any sort of interaction or public
|
|
||||||
communication with the community for a specified period of time. No public or
|
|
||||||
private interaction with the people involved, including unsolicited interaction
|
|
||||||
with those enforcing the Code of Conduct, is allowed during this period.
|
|
||||||
Violating these terms may lead to a permanent ban.
|
|
||||||
|
|
||||||
### 4. Permanent Ban
|
|
||||||
|
|
||||||
**Community Impact**: Demonstrating a pattern of violation of community
|
|
||||||
standards, including sustained inappropriate behavior, harassment of an
|
|
||||||
individual, or aggression toward or disparagement of classes of individuals.
|
|
||||||
|
|
||||||
**Consequence**: A permanent ban from any sort of public interaction within the
|
|
||||||
community.
|
|
||||||
|
|
||||||
## Attribution
|
|
||||||
|
|
||||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
|
||||||
version 2.1, available at
|
|
||||||
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
|
|
||||||
|
|
||||||
Community Impact Guidelines were inspired by
|
|
||||||
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
|
||||||
|
|
||||||
For answers to common questions about this code of conduct, see the FAQ at
|
|
||||||
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
|
|
||||||
[https://www.contributor-covenant.org/translations][translations].
|
|
||||||
|
|
||||||
[homepage]: https://www.contributor-covenant.org
|
|
||||||
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
|
|
||||||
[Mozilla CoC]: https://github.com/mozilla/diversity
|
|
||||||
[FAQ]: https://www.contributor-covenant.org/faq
|
|
||||||
[translations]: https://www.contributor-covenant.org/translations
|
|
||||||
|
|
2827
Cargo.lock
generated
2827
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
241
Cargo.toml
241
Cargo.toml
|
@ -1,190 +1,111 @@
|
||||||
[workspace.lints.rust]
|
|
||||||
explicit_outlives_requirements = "warn"
|
|
||||||
unused_qualifications = "warn"
|
|
||||||
|
|
||||||
[workspace.lints.clippy]
|
|
||||||
cloned_instead_of_copied = "warn"
|
|
||||||
dbg_macro = "warn"
|
|
||||||
str_to_string = "warn"
|
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
authors = ["timokoesters <timo@koesters.xyz>"]
|
|
||||||
description = "A Matrix homeserver written in Rust"
|
|
||||||
edition = "2021"
|
|
||||||
homepage = "https://conduit.rs"
|
|
||||||
license = "Apache-2.0"
|
|
||||||
name = "conduit"
|
name = "conduit"
|
||||||
readme = "README.md"
|
description = "A Matrix homeserver written in Rust"
|
||||||
|
license = "Apache-2.0"
|
||||||
|
authors = ["timokoesters <timo@koesters.xyz>"]
|
||||||
|
homepage = "https://conduit.rs"
|
||||||
repository = "https://gitlab.com/famedly/conduit"
|
repository = "https://gitlab.com/famedly/conduit"
|
||||||
version = "0.10.0-alpha"
|
readme = "README.md"
|
||||||
|
version = "0.4.0-next"
|
||||||
# See also `rust-toolchain.toml`
|
rust-version = "1.64"
|
||||||
rust-version = "1.79.0"
|
edition = "2021"
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[lints]
|
|
||||||
workspace = true
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
# Web framework
|
# Web framework
|
||||||
axum = { version = "0.7", default-features = false, features = [
|
axum = { version = "0.5.17", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true }
|
||||||
"form",
|
axum-server = { version = "0.4.0", features = ["tls-rustls"] }
|
||||||
"http1",
|
tower = { version = "0.4.8", features = ["util"] }
|
||||||
"http2",
|
tower-http = { version = "0.3.4", features = ["add-extension", "cors", "compression-full", "sensitive-headers", "trace", "util"] }
|
||||||
"json",
|
|
||||||
"matched-path",
|
# Used for matrix spec type definitions and helpers
|
||||||
], optional = true }
|
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
||||||
axum-extra = { version = "0.9", features = ["typed-header"] }
|
ruma = { git = "https://github.com/ruma/ruma", rev = "af28dc8339773e5cad460289fa3c4e22d9a058cd", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
||||||
axum-server = { version = "0.6", features = ["tls-rustls"] }
|
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
||||||
tower = { version = "0.4.13", features = ["util"] }
|
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
||||||
tower-http = { version = "0.5", features = [
|
|
||||||
"add-extension",
|
|
||||||
"cors",
|
|
||||||
"sensitive-headers",
|
|
||||||
"trace",
|
|
||||||
"util",
|
|
||||||
] }
|
|
||||||
tower-service = "0.3"
|
|
||||||
|
|
||||||
# Async runtime and utilities
|
# Async runtime and utilities
|
||||||
tokio = { version = "1.28.1", features = ["fs", "macros", "signal", "sync"] }
|
tokio = { version = "1.11.0", features = ["fs", "macros", "signal", "sync"] }
|
||||||
# Used for storing data permanently
|
# Used for storing data permanently
|
||||||
#sled = { version = "0.34.7", features = ["compression", "no_metrics"], optional = true }
|
#sled = { version = "0.34.7", features = ["compression", "no_metrics"], optional = true }
|
||||||
#sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] }
|
#sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] }
|
||||||
persy = { version = "1.4.4", optional = true, features = ["background_ops"] }
|
persy = { version = "1.0.0", optional = true, features = ["background_ops"] }
|
||||||
|
|
||||||
# Used for the http request / response body type for Ruma endpoints used with reqwest
|
# Used for the http request / response body type for Ruma endpoints used with reqwest
|
||||||
bytes = "1.4.0"
|
bytes = "1.1.0"
|
||||||
http = "1"
|
http = "0.2.4"
|
||||||
# Used to find data directory for default db path
|
# Used to find data directory for default db path
|
||||||
directories = "5"
|
directories = "4.0.0"
|
||||||
# Used for ruma wrapper
|
# Used for ruma wrapper
|
||||||
serde_json = { version = "1.0.96", features = ["raw_value"] }
|
serde_json = { version = "1.0.68", features = ["raw_value"] }
|
||||||
# Used for appservice registration files
|
# Used for appservice registration files
|
||||||
serde_yaml = "0.9.21"
|
serde_yaml = "0.9.13"
|
||||||
# Used for pdu definition
|
# Used for pdu definition
|
||||||
serde = { version = "1.0.163", features = ["rc"] }
|
serde = { version = "1.0.130", features = ["rc"] }
|
||||||
# Used for secure identifiers
|
# Used for secure identifiers
|
||||||
rand = "0.8.5"
|
rand = "0.8.4"
|
||||||
# Used to hash passwords
|
# Used to hash passwords
|
||||||
rust-argon2 = "2"
|
rust-argon2 = "1.0.0"
|
||||||
# Used to send requests
|
# Used to send requests
|
||||||
hyper = "1.1"
|
reqwest = { default-features = false, features = ["rustls-tls-native-roots", "socks"], git = "https://github.com/timokoesters/reqwest", rev = "57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" }
|
||||||
hyper-util = { version = "0.1", features = [
|
|
||||||
"client",
|
|
||||||
"client-legacy",
|
|
||||||
"http1",
|
|
||||||
"http2",
|
|
||||||
] }
|
|
||||||
reqwest = { version = "0.12", default-features = false, features = [
|
|
||||||
"rustls-tls-native-roots",
|
|
||||||
"socks",
|
|
||||||
] }
|
|
||||||
# Used for conduit::Error type
|
# Used for conduit::Error type
|
||||||
thiserror = "1.0.40"
|
thiserror = "1.0.29"
|
||||||
# Used to generate thumbnails for images
|
# Used to generate thumbnails for images
|
||||||
image = { version = "0.25", default-features = false, features = [
|
image = { version = "0.24.4", default-features = false, features = ["jpeg", "png", "gif"] }
|
||||||
"gif",
|
|
||||||
"jpeg",
|
|
||||||
"png",
|
|
||||||
] }
|
|
||||||
# Used to encode server public key
|
# Used to encode server public key
|
||||||
base64 = "0.22"
|
base64 = "0.13.0"
|
||||||
# Used when hashing the state
|
# Used when hashing the state
|
||||||
ring = "0.17.7"
|
ring = "0.16.20"
|
||||||
# Used when querying the SRV record of other servers
|
# Used when querying the SRV record of other servers
|
||||||
hickory-resolver = "0.24"
|
trust-dns-resolver = "0.22.0"
|
||||||
# Used to find matching events for appservices
|
# Used to find matching events for appservices
|
||||||
regex = "1.8.1"
|
regex = "1.5.4"
|
||||||
# jwt jsonwebtokens
|
# jwt jsonwebtokens
|
||||||
jsonwebtoken = "9.2.0"
|
jsonwebtoken = "8.1.1"
|
||||||
# Performance measurements
|
# Performance measurements
|
||||||
opentelemetry = "0.22"
|
tracing = { version = "0.1.27", features = [] }
|
||||||
opentelemetry-jaeger-propagator = "0.1"
|
tracing-subscriber = { version = "0.3.16", features = ["env-filter"] }
|
||||||
opentelemetry-otlp = "0.15"
|
|
||||||
opentelemetry_sdk = { version = "0.22", features = ["rt-tokio"] }
|
|
||||||
tracing = "0.1.37"
|
|
||||||
tracing-flame = "0.2.0"
|
tracing-flame = "0.2.0"
|
||||||
tracing-opentelemetry = "0.23"
|
opentelemetry = { version = "0.18.0", features = ["rt-tokio"] }
|
||||||
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
|
opentelemetry-jaeger = { version = "0.17.0", features = ["rt-tokio"] }
|
||||||
|
tracing-opentelemetry = "0.18.0"
|
||||||
lru-cache = "0.1.2"
|
lru-cache = "0.1.2"
|
||||||
|
rusqlite = { version = "0.28.0", optional = true, features = ["bundled"] }
|
||||||
parking_lot = { version = "0.12.1", optional = true }
|
parking_lot = { version = "0.12.1", optional = true }
|
||||||
rusqlite = { version = "0.31", optional = true, features = ["bundled"] }
|
crossbeam = { version = "0.8.1", optional = true }
|
||||||
|
num_cpus = "1.13.0"
|
||||||
# crossbeam = { version = "0.8.2", optional = true }
|
|
||||||
num_cpus = "1.15.0"
|
|
||||||
threadpool = "1.8.1"
|
threadpool = "1.8.1"
|
||||||
# heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
|
heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
|
||||||
# Used for ruma wrapper
|
rocksdb = { version = "0.17.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true }
|
||||||
serde_html_form = "0.2.0"
|
|
||||||
|
|
||||||
thread_local = "1.1.7"
|
thread_local = "1.1.3"
|
||||||
# used for TURN server authentication
|
# used for TURN server authentication
|
||||||
hmac = "0.12.1"
|
hmac = "0.12.1"
|
||||||
sha-1 = "0.10.1"
|
sha-1 = "0.10.0"
|
||||||
# used for conduit's CLI and admin room command parsing
|
# used for conduit's CLI and admin room command parsing
|
||||||
clap = { version = "4.3.0", default-features = false, features = [
|
clap = { version = "4.0.11", default-features = false, features = ["std", "derive", "help", "usage", "error-context"] }
|
||||||
"derive",
|
futures-util = { version = "0.3.17", default-features = false }
|
||||||
"error-context",
|
|
||||||
"help",
|
|
||||||
"std",
|
|
||||||
"string",
|
|
||||||
"usage",
|
|
||||||
] }
|
|
||||||
futures-util = { version = "0.3.28", default-features = false }
|
|
||||||
# Used for reading the configuration from conduit.toml & environment variables
|
# Used for reading the configuration from conduit.toml & environment variables
|
||||||
figment = { version = "0.10.8", features = ["env", "toml"] }
|
figment = { version = "0.10.6", features = ["env", "toml"] }
|
||||||
|
|
||||||
# Validating urls in config
|
tikv-jemalloc-ctl = { version = "0.5.0", features = ["use_std"], optional = true }
|
||||||
url = { version = "2", features = ["serde"] }
|
tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_supported_platforms"], optional = true }
|
||||||
|
lazy_static = "1.4.0"
|
||||||
async-trait = "0.1.68"
|
async-trait = "0.1.57"
|
||||||
tikv-jemallocator = { version = "0.5.0", features = [
|
|
||||||
"unprefixed_malloc_on_supported_platforms",
|
|
||||||
], optional = true }
|
|
||||||
|
|
||||||
sd-notify = { version = "0.4.1", optional = true }
|
sd-notify = { version = "0.4.1", optional = true }
|
||||||
|
|
||||||
# Used for matrix spec type definitions and helpers
|
|
||||||
[dependencies.ruma]
|
|
||||||
features = [
|
|
||||||
"appservice-api-c",
|
|
||||||
"client-api",
|
|
||||||
"compat",
|
|
||||||
"federation-api",
|
|
||||||
"push-gateway-api-c",
|
|
||||||
"rand",
|
|
||||||
"ring-compat",
|
|
||||||
"server-util",
|
|
||||||
"state-res",
|
|
||||||
"unstable-exhaustive-types",
|
|
||||||
"unstable-msc2448",
|
|
||||||
"unstable-msc3575",
|
|
||||||
"unstable-unspecified",
|
|
||||||
]
|
|
||||||
git = "https://github.com/ruma/ruma"
|
|
||||||
|
|
||||||
[dependencies.rocksdb]
|
|
||||||
features = ["lz4", "multi-threaded-cf", "zstd"]
|
|
||||||
optional = true
|
|
||||||
package = "rust-rocksdb"
|
|
||||||
version = "0.25"
|
|
||||||
|
|
||||||
[target.'cfg(unix)'.dependencies]
|
|
||||||
nix = { version = "0.28", features = ["resource"] }
|
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["backend_rocksdb", "backend_sqlite", "conduit_bin", "systemd"]
|
default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "jemalloc", "systemd"]
|
||||||
#backend_sled = ["sled"]
|
#backend_sled = ["sled"]
|
||||||
backend_persy = ["parking_lot", "persy"]
|
backend_persy = ["persy", "parking_lot"]
|
||||||
backend_sqlite = ["sqlite"]
|
backend_sqlite = ["sqlite"]
|
||||||
#backend_heed = ["heed", "crossbeam"]
|
backend_heed = ["heed", "crossbeam"]
|
||||||
backend_rocksdb = ["rocksdb"]
|
backend_rocksdb = ["rocksdb"]
|
||||||
|
jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"]
|
||||||
|
sqlite = ["rusqlite", "parking_lot", "tokio/signal"]
|
||||||
conduit_bin = ["axum"]
|
conduit_bin = ["axum"]
|
||||||
jemalloc = ["tikv-jemallocator"]
|
|
||||||
sqlite = ["parking_lot", "rusqlite", "tokio/signal"]
|
|
||||||
systemd = ["sd-notify"]
|
systemd = ["sd-notify"]
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
|
@ -197,45 +118,35 @@ name = "conduit"
|
||||||
path = "src/lib.rs"
|
path = "src/lib.rs"
|
||||||
|
|
||||||
[package.metadata.deb]
|
[package.metadata.deb]
|
||||||
assets = [
|
name = "matrix-conduit"
|
||||||
[
|
maintainer = "Paul van Tilburg <paul@luon.net>"
|
||||||
"README.md",
|
|
||||||
"usr/share/doc/matrix-conduit/",
|
|
||||||
"644",
|
|
||||||
],
|
|
||||||
[
|
|
||||||
"debian/README.md",
|
|
||||||
"usr/share/doc/matrix-conduit/README.Debian",
|
|
||||||
"644",
|
|
||||||
],
|
|
||||||
[
|
|
||||||
"target/release/conduit",
|
|
||||||
"usr/sbin/matrix-conduit",
|
|
||||||
"755",
|
|
||||||
],
|
|
||||||
]
|
|
||||||
conf-files = ["/etc/matrix-conduit/conduit.toml"]
|
|
||||||
copyright = "2020, Timo Kösters <timo@koesters.xyz>"
|
copyright = "2020, Timo Kösters <timo@koesters.xyz>"
|
||||||
|
license-file = ["LICENSE", "3"]
|
||||||
depends = "$auto, ca-certificates"
|
depends = "$auto, ca-certificates"
|
||||||
extended-description = """\
|
extended-description = """\
|
||||||
A fast Matrix homeserver that is optimized for smaller, personal servers, \
|
A fast Matrix homeserver that is optimized for smaller, personal servers, \
|
||||||
instead of a server that has high scalability."""
|
instead of a server that has high scalability."""
|
||||||
license-file = ["LICENSE", "3"]
|
|
||||||
maintainer = "Paul van Tilburg <paul@luon.net>"
|
|
||||||
maintainer-scripts = "debian/"
|
|
||||||
name = "matrix-conduit"
|
|
||||||
priority = "optional"
|
|
||||||
section = "net"
|
section = "net"
|
||||||
|
priority = "optional"
|
||||||
|
assets = [
|
||||||
|
["debian/README.Debian", "usr/share/doc/matrix-conduit/", "644"],
|
||||||
|
["README.md", "usr/share/doc/matrix-conduit/", "644"],
|
||||||
|
["target/release/conduit", "usr/sbin/matrix-conduit", "755"],
|
||||||
|
]
|
||||||
|
conf-files = [
|
||||||
|
"/etc/matrix-conduit/conduit.toml"
|
||||||
|
]
|
||||||
|
maintainer-scripts = "debian/"
|
||||||
systemd-units = { unit-name = "matrix-conduit" }
|
systemd-units = { unit-name = "matrix-conduit" }
|
||||||
|
|
||||||
[profile.dev]
|
[profile.dev]
|
||||||
incremental = true
|
|
||||||
lto = 'off'
|
lto = 'off'
|
||||||
|
incremental = true
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
codegen-units = 32
|
|
||||||
incremental = true
|
|
||||||
lto = 'thin'
|
lto = 'thin'
|
||||||
|
incremental = true
|
||||||
|
codegen-units=32
|
||||||
# If you want to make flamegraphs, enable debug info:
|
# If you want to make flamegraphs, enable debug info:
|
||||||
# debug = true
|
# debug = true
|
||||||
|
|
||||||
|
|
23
Cross.toml
Normal file
23
Cross.toml
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
[build.env]
|
||||||
|
# CI uses an S3 endpoint to store sccache artifacts, so their config needs to
|
||||||
|
# be available in the cross container as well
|
||||||
|
passthrough = [
|
||||||
|
"RUSTC_WRAPPER",
|
||||||
|
"AWS_ACCESS_KEY_ID",
|
||||||
|
"AWS_SECRET_ACCESS_KEY",
|
||||||
|
"SCCACHE_BUCKET",
|
||||||
|
"SCCACHE_ENDPOINT",
|
||||||
|
"SCCACHE_S3_USE_SSL",
|
||||||
|
]
|
||||||
|
|
||||||
|
[target.aarch64-unknown-linux-musl]
|
||||||
|
image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-aarch64-unknown-linux-musl:latest"
|
||||||
|
|
||||||
|
[target.arm-unknown-linux-musleabihf]
|
||||||
|
image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-arm-unknown-linux-musleabihf:latest"
|
||||||
|
|
||||||
|
[target.armv7-unknown-linux-musleabihf]
|
||||||
|
image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-armv7-unknown-linux-musleabihf:latest"
|
||||||
|
|
||||||
|
[target.x86_64-unknown-linux-musl]
|
||||||
|
image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-x86_64-unknown-linux-musl@sha256:b6d689e42f0236c8a38b961bca2a12086018b85ed20e0826310421daf182e2bb"
|
311
DEPLOY.md
Normal file
311
DEPLOY.md
Normal file
|
@ -0,0 +1,311 @@
|
||||||
|
# Deploying Conduit
|
||||||
|
|
||||||
|
> ## Getting help
|
||||||
|
>
|
||||||
|
> If you run into any problems while setting up Conduit, write an email to `conduit@koesters.xyz`, ask us
|
||||||
|
> in `#conduit:fachschaften.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new).
|
||||||
|
|
||||||
|
## Installing Conduit
|
||||||
|
|
||||||
|
Although you might be able to compile Conduit for Windows, we do recommend running it on a linux server. We therefore
|
||||||
|
only offer Linux binaries.
|
||||||
|
|
||||||
|
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url:
|
||||||
|
|
||||||
|
| CPU Architecture | Download stable version | Download development version |
|
||||||
|
| ------------------------------------------- | --------------------------------------------------------------- | ----------------------------------------------------------- |
|
||||||
|
| x84_64 / amd64 (Most servers and computers) | [Binary][x84_64-glibc-master] / [.deb][x84_64-glibc-master-deb] | [Binary][x84_64-glibc-next] / [.deb][x84_64-glibc-next-deb] |
|
||||||
|
| armv7 (e.g. Raspberry Pi by default) | [Binary][armv7-glibc-master] / [.deb][armv7-glibc-master-deb] | [Binary][armv7-glibc-next] / [.deb][armv7-glibc-next-deb] |
|
||||||
|
| armv8 / aarch64 | [Binary][armv8-glibc-master] / [.deb][armv8-glibc-master-deb] | [Binary][armv8-glibc-next] / [.deb][armv8-glibc-next-deb] |
|
||||||
|
|
||||||
|
These builds were created on and linked against the glibc version shipped with Debian bullseye.
|
||||||
|
If you use a system with an older glibc version, you might need to compile Conduit yourself.
|
||||||
|
|
||||||
|
[x84_64-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit?job=docker:master
|
||||||
|
[armv7-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit?job=docker:master
|
||||||
|
[armv8-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit?job=docker:master
|
||||||
|
[x84_64-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_amd64/conduit?job=docker:next
|
||||||
|
[armv7-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm_v7/conduit?job=docker:next
|
||||||
|
[armv8-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm64/conduit?job=docker:next
|
||||||
|
[x84_64-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit.deb?job=docker:master
|
||||||
|
[armv7-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit.deb?job=docker:master
|
||||||
|
[armv8-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit.deb?job=docker:master
|
||||||
|
[x84_64-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_amd64/conduit.deb?job=docker:next
|
||||||
|
[armv7-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm_v7/conduit.deb?job=docker:next
|
||||||
|
[armv8-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm64/conduit.deb?job=docker:next
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ sudo wget -O /usr/local/bin/matrix-conduit <url>
|
||||||
|
$ sudo chmod +x /usr/local/bin/matrix-conduit
|
||||||
|
```
|
||||||
|
|
||||||
|
Alternatively, you may compile the binary yourself
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ sudo apt install libclang-dev build-essential
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ cargo build --release
|
||||||
|
```
|
||||||
|
|
||||||
|
If you want to cross compile Conduit to another architecture, read the guide below.
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>Cross compilation</summary>
|
||||||
|
|
||||||
|
As easiest way to compile conduit for another platform [cross-rs](https://github.com/cross-rs/cross) is recommended, so install it first.
|
||||||
|
|
||||||
|
In order to use RockDB as storage backend append `-latomic` to linker flags.
|
||||||
|
|
||||||
|
For example, to build a binary for Raspberry Pi Zero W (ARMv6) you need `arm-unknown-linux-gnueabihf` as compilation
|
||||||
|
target.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://gitlab.com/famedly/conduit.git
|
||||||
|
cd conduit
|
||||||
|
export RUSTFLAGS='-C link-arg=-lgcc -Clink-arg=-latomic -Clink-arg=-static-libgcc'
|
||||||
|
cross build --release --no-default-features --features conduit_bin,backend_rocksdb,jemalloc --target=arm-unknown-linux-gnueabihf
|
||||||
|
```
|
||||||
|
</details>
|
||||||
|
|
||||||
|
## Adding a Conduit user
|
||||||
|
|
||||||
|
While Conduit can run as any user it is usually better to use dedicated users for different services. This also allows
|
||||||
|
you to make sure that the file permissions are correctly set up.
|
||||||
|
|
||||||
|
In Debian you can use this command to create a Conduit user:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo adduser --system conduit --no-create-home
|
||||||
|
```
|
||||||
|
|
||||||
|
## Forwarding ports in the firewall or the router
|
||||||
|
|
||||||
|
Conduit uses the ports 443 and 8448 both of which need to be open in the firewall.
|
||||||
|
|
||||||
|
If Conduit runs behind a router or in a container and has a different public IP address than the host system these public ports need to be forwarded directly or indirectly to the port mentioned in the config.
|
||||||
|
|
||||||
|
## Setting up a systemd service
|
||||||
|
|
||||||
|
Now we'll set up a systemd service for Conduit, so it's easy to start/stop Conduit and set it to autostart when your
|
||||||
|
server reboots. Simply paste the default systemd service you can find below into
|
||||||
|
`/etc/systemd/system/conduit.service`.
|
||||||
|
|
||||||
|
```systemd
|
||||||
|
[Unit]
|
||||||
|
Description=Conduit Matrix Server
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Environment="CONDUIT_CONFIG=/etc/matrix-conduit/conduit.toml"
|
||||||
|
User=conduit
|
||||||
|
Group=nogroup
|
||||||
|
Restart=always
|
||||||
|
ExecStart=/usr/local/bin/matrix-conduit
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
```
|
||||||
|
|
||||||
|
Finally, run
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ sudo systemctl daemon-reload
|
||||||
|
```
|
||||||
|
|
||||||
|
## Creating the Conduit configuration file
|
||||||
|
|
||||||
|
Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste this in **and take a moment
|
||||||
|
to read it. You need to change at least the server name.**
|
||||||
|
You can also choose to use a different database backend, but right now only `rocksdb` and `sqlite` are recommended.
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[global]
|
||||||
|
# The server_name is the pretty name of this server. It is used as a suffix for user
|
||||||
|
# and room ids. Examples: matrix.org, conduit.rs
|
||||||
|
|
||||||
|
# The Conduit server needs all /_matrix/ requests to be reachable at
|
||||||
|
# https://your.server.name/ on port 443 (client-server) and 8448 (federation).
|
||||||
|
|
||||||
|
# If that's not possible for you, you can create /.well-known files to redirect
|
||||||
|
# requests. See
|
||||||
|
# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client
|
||||||
|
# and
|
||||||
|
# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server
|
||||||
|
# for more information
|
||||||
|
|
||||||
|
# YOU NEED TO EDIT THIS
|
||||||
|
#server_name = "your.server.name"
|
||||||
|
|
||||||
|
# This is the only directory where Conduit will save its data
|
||||||
|
database_path = "/var/lib/matrix-conduit/"
|
||||||
|
database_backend = "rocksdb"
|
||||||
|
|
||||||
|
# The port Conduit will be running on. You need to set up a reverse proxy in
|
||||||
|
# your web server (e.g. apache or nginx), so all requests to /_matrix on port
|
||||||
|
# 443 and 8448 will be forwarded to the Conduit instance running on this port
|
||||||
|
# Docker users: Don't change this, you'll need to map an external port to this.
|
||||||
|
port = 6167
|
||||||
|
|
||||||
|
# Max size for uploads
|
||||||
|
max_request_size = 20_000_000 # in bytes
|
||||||
|
|
||||||
|
# Enables registration. If set to false, no users can register on this server.
|
||||||
|
allow_registration = true
|
||||||
|
|
||||||
|
allow_federation = true
|
||||||
|
|
||||||
|
trusted_servers = ["matrix.org"]
|
||||||
|
|
||||||
|
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||||
|
#log = "warn,state_res=warn,rocket=off,_=off,sled=off"
|
||||||
|
|
||||||
|
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
||||||
|
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
|
||||||
|
```
|
||||||
|
|
||||||
|
## Setting the correct file permissions
|
||||||
|
|
||||||
|
As we are using a Conduit specific user we need to allow it to read the config. To do that you can run this command on
|
||||||
|
Debian:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo chown -R root:root /etc/matrix-conduit
|
||||||
|
sudo chmod 755 /etc/matrix-conduit
|
||||||
|
```
|
||||||
|
|
||||||
|
If you use the default database path you also need to run this:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo mkdir -p /var/lib/matrix-conduit/
|
||||||
|
sudo chown -R conduit:nogroup /var/lib/matrix-conduit/
|
||||||
|
sudo chmod 700 /var/lib/matrix-conduit/
|
||||||
|
```
|
||||||
|
|
||||||
|
## Setting up the Reverse Proxy
|
||||||
|
|
||||||
|
This depends on whether you use Apache, Caddy, Nginx or another web server.
|
||||||
|
|
||||||
|
### Apache
|
||||||
|
|
||||||
|
Create `/etc/apache2/sites-enabled/050-conduit.conf` and copy-and-paste this:
|
||||||
|
|
||||||
|
```apache
|
||||||
|
Listen 8448
|
||||||
|
|
||||||
|
<VirtualHost *:443 *:8448>
|
||||||
|
|
||||||
|
ServerName your.server.name # EDIT THIS
|
||||||
|
|
||||||
|
AllowEncodedSlashes NoDecode
|
||||||
|
ProxyPass /_matrix/ http://127.0.0.1:6167/_matrix/ nocanon
|
||||||
|
ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/
|
||||||
|
|
||||||
|
</VirtualHost>
|
||||||
|
```
|
||||||
|
|
||||||
|
**You need to make some edits again.** When you are done, run
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ sudo systemctl reload apache2
|
||||||
|
```
|
||||||
|
|
||||||
|
### Caddy
|
||||||
|
|
||||||
|
Create `/etc/caddy/conf.d/conduit_caddyfile` and enter this (substitute for your server name).
|
||||||
|
|
||||||
|
```caddy
|
||||||
|
your.server.name, your.server.name:8448 {
|
||||||
|
reverse_proxy /_matrix/* 127.0.0.1:6167
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
That's it! Just start or enable the service and you're set.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ sudo systemctl enable caddy
|
||||||
|
```
|
||||||
|
|
||||||
|
### Nginx
|
||||||
|
|
||||||
|
If you use Nginx and not Apache, add the following server section inside the http section of `/etc/nginx/nginx.conf`
|
||||||
|
|
||||||
|
```nginx
|
||||||
|
server {
|
||||||
|
listen 443 ssl http2;
|
||||||
|
listen [::]:443 ssl http2;
|
||||||
|
listen 8448 ssl http2;
|
||||||
|
listen [::]:8448 ssl http2;
|
||||||
|
server_name your.server.name; # EDIT THIS
|
||||||
|
merge_slashes off;
|
||||||
|
|
||||||
|
# Nginx defaults to only allow 1MB uploads
|
||||||
|
client_max_body_size 20M;
|
||||||
|
|
||||||
|
location /_matrix/ {
|
||||||
|
proxy_pass http://127.0.0.1:6167$request_uri;
|
||||||
|
proxy_set_header Host $http_host;
|
||||||
|
proxy_buffering off;
|
||||||
|
}
|
||||||
|
|
||||||
|
ssl_certificate /etc/letsencrypt/live/your.server.name/fullchain.pem; # EDIT THIS
|
||||||
|
ssl_certificate_key /etc/letsencrypt/live/your.server.name/privkey.pem; # EDIT THIS
|
||||||
|
ssl_trusted_certificate /etc/letsencrypt/live/your.server.name/chain.pem; # EDIT THIS
|
||||||
|
include /etc/letsencrypt/options-ssl-nginx.conf;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**You need to make some edits again.** When you are done, run
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ sudo systemctl reload nginx
|
||||||
|
```
|
||||||
|
|
||||||
|
## SSL Certificate
|
||||||
|
|
||||||
|
If you chose Caddy as your web proxy SSL certificates are handled automatically and you can skip this step.
|
||||||
|
|
||||||
|
The easiest way to get an SSL certificate, if you don't have one already, is to install `certbot` and run this:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ sudo certbot -d your.server.name
|
||||||
|
```
|
||||||
|
|
||||||
|
## You're done!
|
||||||
|
|
||||||
|
Now you can start Conduit with:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ sudo systemctl start conduit
|
||||||
|
```
|
||||||
|
|
||||||
|
Set it to start automatically when your system boots with:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ sudo systemctl enable conduit
|
||||||
|
```
|
||||||
|
|
||||||
|
## How do I know it works?
|
||||||
|
|
||||||
|
You can open <https://app.element.io>, enter your homeserver and try to register.
|
||||||
|
|
||||||
|
You can also use these commands as a quick health check.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ curl https://your.server.name/_matrix/client/versions
|
||||||
|
$ curl https://your.server.name:8448/_matrix/client/versions
|
||||||
|
```
|
||||||
|
|
||||||
|
- To check if your server can talk with other homeservers, you can use the [Matrix Federation Tester](https://federationtester.matrix.org/).
|
||||||
|
If you can register but cannot join federated rooms check your config again and also check if the port 8448 is open and forwarded correctly.
|
||||||
|
|
||||||
|
# What's next?
|
||||||
|
|
||||||
|
## Audio/Video calls
|
||||||
|
|
||||||
|
For Audio/Video call functionality see the [TURN Guide](TURN.md).
|
||||||
|
|
||||||
|
## Appservices
|
||||||
|
|
||||||
|
If you want to set up an appservice, take a look at the [Appservice Guide](APPSERVICES.md).
|
130
Dockerfile
Normal file
130
Dockerfile
Normal file
|
@ -0,0 +1,130 @@
|
||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
FROM docker.io/rust:1.64-bullseye AS builder
|
||||||
|
WORKDIR /usr/src/conduit
|
||||||
|
|
||||||
|
# Install required packages to build Conduit and it's dependencies
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get -y --no-install-recommends install libclang-dev=1:11.0-51+nmu5
|
||||||
|
|
||||||
|
# == Build dependencies without our own code separately for caching ==
|
||||||
|
#
|
||||||
|
# Need a fake main.rs since Cargo refuses to build anything otherwise.
|
||||||
|
#
|
||||||
|
# See https://github.com/rust-lang/cargo/issues/2644 for a Cargo feature
|
||||||
|
# request that would allow just dependencies to be compiled, presumably
|
||||||
|
# regardless of whether source files are available.
|
||||||
|
RUN mkdir src && touch src/lib.rs && echo 'fn main() {}' > src/main.rs
|
||||||
|
COPY Cargo.toml Cargo.lock ./
|
||||||
|
RUN cargo build --release && rm -r src
|
||||||
|
|
||||||
|
# Copy over actual Conduit sources
|
||||||
|
COPY src src
|
||||||
|
|
||||||
|
# main.rs and lib.rs need their timestamp updated for this to work correctly since
|
||||||
|
# otherwise the build with the fake main.rs from above is newer than the
|
||||||
|
# source files (COPY preserves timestamps).
|
||||||
|
#
|
||||||
|
# Builds conduit and places the binary at /usr/src/conduit/target/release/conduit
|
||||||
|
RUN touch src/main.rs && touch src/lib.rs && cargo build --release
|
||||||
|
|
||||||
|
|
||||||
|
# ONLY USEFUL FOR CI: target stage to extract build artifacts
|
||||||
|
FROM scratch AS builder-result
|
||||||
|
COPY --from=builder /usr/src/conduit/target/release/conduit /conduit
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------------------------------------------
|
||||||
|
# Build cargo-deb, a tool to package up rust binaries into .deb packages for Debian/Ubuntu based systems:
|
||||||
|
# ---------------------------------------------------------------------------------------------------------------
|
||||||
|
FROM docker.io/rust:1.64-bullseye AS build-cargo-deb
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
dpkg \
|
||||||
|
dpkg-dev \
|
||||||
|
liblzma-dev
|
||||||
|
|
||||||
|
RUN cargo install cargo-deb
|
||||||
|
# => binary is in /usr/local/cargo/bin/cargo-deb
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------------------------------------------
|
||||||
|
# Package conduit build-result into a .deb package:
|
||||||
|
# ---------------------------------------------------------------------------------------------------------------
|
||||||
|
FROM builder AS packager
|
||||||
|
WORKDIR /usr/src/conduit
|
||||||
|
|
||||||
|
COPY ./LICENSE ./LICENSE
|
||||||
|
COPY ./README.md ./README.md
|
||||||
|
COPY debian/README.Debian ./debian/
|
||||||
|
COPY --from=build-cargo-deb /usr/local/cargo/bin/cargo-deb /usr/local/cargo/bin/cargo-deb
|
||||||
|
|
||||||
|
# --no-build makes cargo-deb reuse already compiled project
|
||||||
|
RUN cargo deb --no-build
|
||||||
|
# => Package is in /usr/src/conduit/target/debian/<project_name>_<version>_<arch>.deb
|
||||||
|
|
||||||
|
|
||||||
|
# ONLY USEFUL FOR CI: target stage to extract build artifacts
|
||||||
|
FROM scratch AS packager-result
|
||||||
|
COPY --from=packager /usr/src/conduit/target/debian/*.deb /conduit.deb
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------------------------------------------
|
||||||
|
# Stuff below this line actually ends up in the resulting docker image
|
||||||
|
# ---------------------------------------------------------------------------------------------------------------
|
||||||
|
FROM docker.io/debian:bullseye-slim AS runner
|
||||||
|
|
||||||
|
# Standard port on which Conduit launches.
|
||||||
|
# You still need to map the port when using the docker command or docker-compose.
|
||||||
|
EXPOSE 6167
|
||||||
|
|
||||||
|
ARG DEFAULT_DB_PATH=/var/lib/matrix-conduit
|
||||||
|
|
||||||
|
ENV CONDUIT_PORT=6167 \
|
||||||
|
CONDUIT_ADDRESS="0.0.0.0" \
|
||||||
|
CONDUIT_DATABASE_PATH=${DEFAULT_DB_PATH} \
|
||||||
|
CONDUIT_CONFIG=''
|
||||||
|
# └─> Set no config file to do all configuration with env vars
|
||||||
|
|
||||||
|
# Conduit needs:
|
||||||
|
# dpkg: to install conduit.deb
|
||||||
|
# ca-certificates: for https
|
||||||
|
# iproute2 & wget: for the healthcheck script
|
||||||
|
RUN apt-get update && apt-get -y --no-install-recommends install \
|
||||||
|
dpkg \
|
||||||
|
ca-certificates \
|
||||||
|
iproute2 \
|
||||||
|
wget \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Test if Conduit is still alive, uses the same endpoint as Element
|
||||||
|
COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh
|
||||||
|
HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
|
||||||
|
|
||||||
|
# Install conduit.deb:
|
||||||
|
COPY --from=packager /usr/src/conduit/target/debian/*.deb /srv/conduit/
|
||||||
|
RUN dpkg -i /srv/conduit/*.deb
|
||||||
|
|
||||||
|
# Improve security: Don't run stuff as root, that does not need to run as root
|
||||||
|
# Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems.
|
||||||
|
ARG USER_ID=1000
|
||||||
|
ARG GROUP_ID=1000
|
||||||
|
RUN set -x ; \
|
||||||
|
groupadd -r -g ${GROUP_ID} conduit ; \
|
||||||
|
useradd -l -r -M -d /srv/conduit -o -u ${USER_ID} -g conduit conduit && exit 0 ; exit 1
|
||||||
|
|
||||||
|
# Create database directory, change ownership of Conduit files to conduit user and group and make the healthcheck executable:
|
||||||
|
RUN chown -cR conduit:conduit /srv/conduit && \
|
||||||
|
chmod +x /srv/conduit/healthcheck.sh && \
|
||||||
|
mkdir -p ${DEFAULT_DB_PATH} && \
|
||||||
|
chown -cR conduit:conduit ${DEFAULT_DB_PATH}
|
||||||
|
|
||||||
|
# Change user to conduit, no root permissions afterwards:
|
||||||
|
USER conduit
|
||||||
|
# Set container home directory
|
||||||
|
WORKDIR /srv/conduit
|
||||||
|
|
||||||
|
# Run Conduit and print backtraces on panics
|
||||||
|
ENV RUST_BACKTRACE=1
|
||||||
|
ENTRYPOINT [ "/usr/sbin/matrix-conduit" ]
|
68
README.md
68
README.md
|
@ -1,15 +1,7 @@
|
||||||
# Conduit
|
# Conduit
|
||||||
|
|
||||||
<!-- ANCHOR: catchphrase -->
|
|
||||||
### A Matrix homeserver written in Rust
|
### A Matrix homeserver written in Rust
|
||||||
<!-- ANCHOR_END: catchphrase -->
|
|
||||||
|
|
||||||
Please visit the [Conduit documentation](https://famedly.gitlab.io/conduit) for more information.
|
|
||||||
Alternatively you can open [docs/introduction.md](docs/introduction.md) in this repository.
|
|
||||||
|
|
||||||
<!-- ANCHOR: body -->
|
|
||||||
#### What is Matrix?
|
#### What is Matrix?
|
||||||
|
|
||||||
[Matrix](https://matrix.org) is an open network for secure and decentralized
|
[Matrix](https://matrix.org) is an open network for secure and decentralized
|
||||||
communication. Users from every Matrix homeserver can chat with users from all
|
communication. Users from every Matrix homeserver can chat with users from all
|
||||||
other Matrix servers. You can even use bridges (also called Matrix appservices)
|
other Matrix servers. You can even use bridges (also called Matrix appservices)
|
||||||
|
@ -23,7 +15,11 @@ friends or company.
|
||||||
|
|
||||||
#### Can I try it out?
|
#### Can I try it out?
|
||||||
|
|
||||||
Yes! You can test our Conduit instance by opening a client that supports registration tokens such as [Element web](https://app.element.io/), [Nheko](https://matrix.org/ecosystem/clients/nheko/) or [SchildiChat web](https://app.schildi.chat/) and registering on the `conduit.rs` homeserver. The registration token is "for_testing_only". Don't share personal information. Once you have registered, you can use any other [Matrix client](https://matrix.org/ecosystem/clients) to login.
|
Yes! You can test our Conduit instance by opening a Matrix client (<https://app.element.io> or Element Android for
|
||||||
|
example) and registering on the `conduit.rs` homeserver.
|
||||||
|
|
||||||
|
*Registration is currently disabled because of scammers. For an account please
|
||||||
|
message us (see contact section below).*
|
||||||
|
|
||||||
Server hosting for conduit.rs is donated by the Matrix.org Foundation.
|
Server hosting for conduit.rs is donated by the Matrix.org Foundation.
|
||||||
|
|
||||||
|
@ -37,32 +33,27 @@ There are still a few important features missing:
|
||||||
|
|
||||||
- E2EE emoji comparison over federation (E2EE chat works)
|
- E2EE emoji comparison over federation (E2EE chat works)
|
||||||
- Outgoing read receipts, typing, presence over federation (incoming works)
|
- Outgoing read receipts, typing, presence over federation (incoming works)
|
||||||
<!-- ANCHOR_END: body -->
|
|
||||||
|
|
||||||
<!-- ANCHOR: footer -->
|
Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit/-/milestones/3).
|
||||||
|
|
||||||
|
#### How can I deploy my own?
|
||||||
|
|
||||||
|
- Simple install (this was tested the most): [DEPLOY.md](DEPLOY.md)
|
||||||
|
- Debian package: [debian/README.Debian](debian/README.Debian)
|
||||||
|
- Nix/NixOS: [nix/README.md](nix/README.md)
|
||||||
|
- Docker: [docker/README.md](docker/README.md)
|
||||||
|
|
||||||
|
If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md](APPSERVICES.md).
|
||||||
|
|
||||||
#### How can I contribute?
|
#### How can I contribute?
|
||||||
|
|
||||||
1. Look for an issue you would like to work on and make sure no one else is currently working on it.
|
1. Look for an issue you would like to work on and make sure it's not assigned
|
||||||
2. Tell us that you are working on the issue (comment on the issue or chat in
|
to other users
|
||||||
[#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org)). If it is more complicated, please explain your approach and ask questions.
|
2. Ask someone to assign the issue to you (comment on the issue or chat in
|
||||||
3. Fork the repo, create a new branch and push commits.
|
[#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org))
|
||||||
|
3. Fork the repo and work on the issue.[#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org) is happy to help :)
|
||||||
4. Submit a MR
|
4. Submit a MR
|
||||||
|
|
||||||
#### Contact
|
|
||||||
|
|
||||||
If you have any questions, feel free to
|
|
||||||
- Ask in `#conduit:fachschaften.org` on Matrix
|
|
||||||
- Write an E-Mail to `conduit@koesters.xyz`
|
|
||||||
- Send an direct message to `@timokoesters:fachschaften.org` on Matrix
|
|
||||||
- [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new)
|
|
||||||
|
|
||||||
#### Security
|
|
||||||
|
|
||||||
If you believe you have found a security issue, please send a message to [Timo](https://matrix.to/#/@timo:conduit.rs)
|
|
||||||
and/or [Matthias](https://matrix.to/#/@matthias:ahouansou.cz) on Matrix, or send an email to
|
|
||||||
[conduit@koesters.xyz](mailto:conduit@koesters.xyz). Please do not disclose details about the issue to anyone else before
|
|
||||||
a fix is released publically.
|
|
||||||
|
|
||||||
#### Thanks to
|
#### Thanks to
|
||||||
|
|
||||||
Thanks to FUTO, Famedly, Prototype Fund (DLR and German BMBF) and all individuals for financially supporting this project.
|
Thanks to FUTO, Famedly, Prototype Fund (DLR and German BMBF) and all individuals for financially supporting this project.
|
||||||
|
@ -72,13 +63,20 @@ Thanks to the contributors to Conduit and all libraries we use, for example:
|
||||||
- Ruma: A clean library for the Matrix Spec in Rust
|
- Ruma: A clean library for the Matrix Spec in Rust
|
||||||
- axum: A modular web framework
|
- axum: A modular web framework
|
||||||
|
|
||||||
|
#### Contact
|
||||||
|
|
||||||
|
If you run into any question, feel free to
|
||||||
|
- Ask us in `#conduit:fachschaften.org` on Matrix
|
||||||
|
- Write an E-Mail to `conduit@koesters.xyz`
|
||||||
|
- Send an direct message to `timo@fachschaften.org` on Matrix
|
||||||
|
- [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new)
|
||||||
|
|
||||||
#### Donate
|
#### Donate
|
||||||
|
|
||||||
- Liberapay: <https://liberapay.com/timokoesters/>
|
Liberapay: <https://liberapay.com/timokoesters/>\
|
||||||
- Bitcoin: `bc1qnnykf986tw49ur7wx9rpw2tevpsztvar5x8w4n`
|
Bitcoin: `bc1qnnykf986tw49ur7wx9rpw2tevpsztvar5x8w4n`
|
||||||
|
|
||||||
#### Logo
|
#### Logo
|
||||||
|
|
||||||
- Lightning Bolt Logo: <https://github.com/mozilla/fxemoji/blob/gh-pages/svgs/nature/u26A1-bolt.svg>
|
Lightning Bolt Logo: https://github.com/mozilla/fxemoji/blob/gh-pages/svgs/nature/u26A1-bolt.svg \
|
||||||
- Logo License: <https://github.com/mozilla/fxemoji/blob/gh-pages/LICENSE.md>
|
Logo License: https://github.com/mozilla/fxemoji/blob/gh-pages/LICENSE.md
|
||||||
<!-- ANCHOR_END: footer -->
|
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
# Setting up TURN/STUN
|
# Setting up TURN/STURN
|
||||||
|
|
||||||
## General instructions
|
## General instructions
|
||||||
|
|
||||||
* It is assumed you have a [Coturn server](https://github.com/coturn/coturn) up and running. See [Synapse reference implementation](https://github.com/element-hq/synapse/blob/develop/docs/turn-howto.md).
|
* It is assumed you have a [Coturn server](https://github.com/coturn/coturn) up and running. See [Synapse reference implementation](https://github.com/matrix-org/synapse/blob/develop/docs/turn-howto.md).
|
||||||
|
|
||||||
## Edit/Add a few settings to your existing conduit.toml
|
## Edit/Add a few settings to your existing conduit.toml
|
||||||
|
|
|
@ -1,37 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
# Path to Complement's source code
|
|
||||||
COMPLEMENT_SRC="$1"
|
|
||||||
|
|
||||||
# A `.jsonl` file to write test logs to
|
|
||||||
LOG_FILE="$2"
|
|
||||||
|
|
||||||
# A `.jsonl` file to write test results to
|
|
||||||
RESULTS_FILE="$3"
|
|
||||||
|
|
||||||
OCI_IMAGE="complement-conduit:dev"
|
|
||||||
|
|
||||||
env \
|
|
||||||
-C "$(git rev-parse --show-toplevel)" \
|
|
||||||
docker build \
|
|
||||||
--tag "$OCI_IMAGE" \
|
|
||||||
--file complement/Dockerfile \
|
|
||||||
.
|
|
||||||
|
|
||||||
# It's okay (likely, even) that `go test` exits nonzero
|
|
||||||
set +o pipefail
|
|
||||||
env \
|
|
||||||
-C "$COMPLEMENT_SRC" \
|
|
||||||
COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \
|
|
||||||
go test -json ./tests | tee "$LOG_FILE"
|
|
||||||
set -o pipefail
|
|
||||||
|
|
||||||
# Post-process the results into an easy-to-compare format
|
|
||||||
cat "$LOG_FILE" | jq -c '
|
|
||||||
select(
|
|
||||||
(.Action == "pass" or .Action == "fail" or .Action == "skip")
|
|
||||||
and .Test != null
|
|
||||||
) | {Action: .Action, Test: .Test}
|
|
||||||
' | sort > "$RESULTS_FILE"
|
|
|
@ -1,40 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
# Build the installable and forward any other arguments too. Also, use
|
|
||||||
# nix-output-monitor instead if it's available.
|
|
||||||
if command -v nom &> /dev/null; then
|
|
||||||
nom build "$@"
|
|
||||||
else
|
|
||||||
nix build "$@"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -z ${ATTIC_TOKEN+x} ]; then
|
|
||||||
nix run --inputs-from . attic -- \
|
|
||||||
login \
|
|
||||||
conduit \
|
|
||||||
"${ATTIC_ENDPOINT:-https://attic.conduit.rs/conduit}" \
|
|
||||||
"$ATTIC_TOKEN"
|
|
||||||
|
|
||||||
readarray -t derivations < <(nix path-info "$@" --derivation)
|
|
||||||
for derivation in "${derivations[@]}"; do
|
|
||||||
cache+=(
|
|
||||||
"$(nix-store --query --requisites --include-outputs "$derivation")"
|
|
||||||
)
|
|
||||||
done
|
|
||||||
|
|
||||||
# Upload them to Attic
|
|
||||||
#
|
|
||||||
# Use `xargs` and a here-string because something would probably explode if
|
|
||||||
# several thousand arguments got passed to a command at once. Hopefully no
|
|
||||||
# store paths include a newline in them.
|
|
||||||
(
|
|
||||||
IFS=$'\n'
|
|
||||||
nix shell --inputs-from . attic -c xargs \
|
|
||||||
attic push conduit <<< "${cache[*]}"
|
|
||||||
)
|
|
||||||
|
|
||||||
else
|
|
||||||
echo "\$ATTIC_TOKEN is unset, skipping uploading to the binary cache"
|
|
||||||
fi
|
|
21
book.toml
21
book.toml
|
@ -1,21 +0,0 @@
|
||||||
[book]
|
|
||||||
description = "Conduit is a simple, fast and reliable chat server for the Matrix protocol"
|
|
||||||
language = "en"
|
|
||||||
multilingual = false
|
|
||||||
src = "docs"
|
|
||||||
title = "Conduit"
|
|
||||||
|
|
||||||
[build]
|
|
||||||
build-dir = "public"
|
|
||||||
create-missing = true
|
|
||||||
|
|
||||||
[output.html]
|
|
||||||
edit-url-template = "https://gitlab.com/famedly/conduit/-/edit/next/{path}"
|
|
||||||
git-repository-icon = "fa-git-square"
|
|
||||||
git-repository-url = "https://gitlab.com/famedly/conduit"
|
|
||||||
|
|
||||||
[output.html.search]
|
|
||||||
limit-results = 15
|
|
||||||
|
|
||||||
[output.html.code.hidelines]
|
|
||||||
json = "~"
|
|
|
@ -1,45 +0,0 @@
|
||||||
FROM rust:1.79.0
|
|
||||||
|
|
||||||
WORKDIR /workdir
|
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
|
||||||
libclang-dev
|
|
||||||
|
|
||||||
COPY Cargo.toml Cargo.toml
|
|
||||||
COPY Cargo.lock Cargo.lock
|
|
||||||
COPY src src
|
|
||||||
RUN cargo build --release \
|
|
||||||
&& mv target/release/conduit conduit \
|
|
||||||
&& rm -rf target
|
|
||||||
|
|
||||||
# Install caddy
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
debian-keyring \
|
|
||||||
debian-archive-keyring \
|
|
||||||
apt-transport-https \
|
|
||||||
curl \
|
|
||||||
&& curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/gpg.key' \
|
|
||||||
| gpg --dearmor -o /usr/share/keyrings/caddy-testing-archive-keyring.gpg \
|
|
||||||
&& curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/debian.deb.txt' \
|
|
||||||
| tee /etc/apt/sources.list.d/caddy-testing.list \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y caddy
|
|
||||||
|
|
||||||
COPY conduit-example.toml conduit.toml
|
|
||||||
COPY complement/caddy.json caddy.json
|
|
||||||
|
|
||||||
ENV SERVER_NAME=localhost
|
|
||||||
ENV CONDUIT_CONFIG=/workdir/conduit.toml
|
|
||||||
|
|
||||||
RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml
|
|
||||||
RUN echo "log = \"warn,_=off,sled=off\"" >> conduit.toml
|
|
||||||
RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml
|
|
||||||
|
|
||||||
EXPOSE 8008 8448
|
|
||||||
|
|
||||||
CMD uname -a && \
|
|
||||||
sed -i "s/#server_name = \"your.server.name\"/server_name = \"${SERVER_NAME}\"/g" conduit.toml && \
|
|
||||||
sed -i "s/your.server.name/${SERVER_NAME}/g" caddy.json && \
|
|
||||||
caddy start --config caddy.json > /dev/null && \
|
|
||||||
/workdir/conduit
|
|
|
@ -1,11 +0,0 @@
|
||||||
# Complement
|
|
||||||
|
|
||||||
## What's that?
|
|
||||||
|
|
||||||
Have a look at [its repository](https://github.com/matrix-org/complement).
|
|
||||||
|
|
||||||
## How do I use it with Conduit?
|
|
||||||
|
|
||||||
The script at [`../bin/complement`](../bin/complement) has automation for this.
|
|
||||||
It takes a few command line arguments, you can read the script to find out what
|
|
||||||
those are.
|
|
|
@ -1,72 +0,0 @@
|
||||||
{
|
|
||||||
"logging": {
|
|
||||||
"logs": {
|
|
||||||
"default": {
|
|
||||||
"level": "WARN"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"apps": {
|
|
||||||
"http": {
|
|
||||||
"https_port": 8448,
|
|
||||||
"servers": {
|
|
||||||
"srv0": {
|
|
||||||
"listen": [":8448"],
|
|
||||||
"routes": [{
|
|
||||||
"match": [{
|
|
||||||
"host": ["your.server.name"]
|
|
||||||
}],
|
|
||||||
"handle": [{
|
|
||||||
"handler": "subroute",
|
|
||||||
"routes": [{
|
|
||||||
"handle": [{
|
|
||||||
"handler": "reverse_proxy",
|
|
||||||
"upstreams": [{
|
|
||||||
"dial": "127.0.0.1:8008"
|
|
||||||
}]
|
|
||||||
}]
|
|
||||||
}]
|
|
||||||
}],
|
|
||||||
"terminal": true
|
|
||||||
}],
|
|
||||||
"tls_connection_policies": [{
|
|
||||||
"match": {
|
|
||||||
"sni": ["your.server.name"]
|
|
||||||
}
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"pki": {
|
|
||||||
"certificate_authorities": {
|
|
||||||
"local": {
|
|
||||||
"name": "Complement CA",
|
|
||||||
"root": {
|
|
||||||
"certificate": "/complement/ca/ca.crt",
|
|
||||||
"private_key": "/complement/ca/ca.key"
|
|
||||||
},
|
|
||||||
"intermediate": {
|
|
||||||
"certificate": "/complement/ca/ca.crt",
|
|
||||||
"private_key": "/complement/ca/ca.key"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"tls": {
|
|
||||||
"automation": {
|
|
||||||
"policies": [{
|
|
||||||
"subjects": ["your.server.name"],
|
|
||||||
"issuers": [{
|
|
||||||
"module": "internal"
|
|
||||||
}],
|
|
||||||
"on_demand": true
|
|
||||||
}, {
|
|
||||||
"issuers": [{
|
|
||||||
"module": "internal",
|
|
||||||
"ca": "local"
|
|
||||||
}]
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -17,14 +17,14 @@
|
||||||
# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client
|
# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client
|
||||||
# and
|
# and
|
||||||
# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server
|
# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server
|
||||||
# for more information, or continue below to see how conduit can do this for you.
|
# for more information
|
||||||
|
|
||||||
# YOU NEED TO EDIT THIS
|
# YOU NEED TO EDIT THIS
|
||||||
#server_name = "your.server.name"
|
#server_name = "your.server.name"
|
||||||
|
|
||||||
database_backend = "rocksdb"
|
|
||||||
# This is the only directory where Conduit will save its data
|
# This is the only directory where Conduit will save its data
|
||||||
database_path = "/var/lib/matrix-conduit/"
|
database_path = "/var/lib/matrix-conduit/"
|
||||||
|
database_backend = "rocksdb"
|
||||||
|
|
||||||
# The port Conduit will be running on. You need to set up a reverse proxy in
|
# The port Conduit will be running on. You need to set up a reverse proxy in
|
||||||
# your web server (e.g. apache or nginx), so all requests to /_matrix on port
|
# your web server (e.g. apache or nginx), so all requests to /_matrix on port
|
||||||
|
@ -38,37 +38,15 @@ max_request_size = 20_000_000 # in bytes
|
||||||
# Enables registration. If set to false, no users can register on this server.
|
# Enables registration. If set to false, no users can register on this server.
|
||||||
allow_registration = true
|
allow_registration = true
|
||||||
|
|
||||||
# A static registration token that new users will have to provide when creating
|
|
||||||
# an account. YOU NEED TO EDIT THIS.
|
|
||||||
# - Insert a password that users will have to enter on registration
|
|
||||||
# - Start the line with '#' to remove the condition
|
|
||||||
registration_token = ""
|
|
||||||
|
|
||||||
allow_check_for_updates = true
|
|
||||||
allow_federation = true
|
allow_federation = true
|
||||||
|
|
||||||
# Enable the display name lightning bolt on registration.
|
# Enable the display name lightning bolt on registration.
|
||||||
enable_lightning_bolt = true
|
enable_lightning_bolt = true
|
||||||
|
|
||||||
# Servers listed here will be used to gather public keys of other servers.
|
|
||||||
# Generally, copying this exactly should be enough. (Currently, Conduit doesn't
|
|
||||||
# support batched key requests, so this list should only contain Synapse
|
|
||||||
# servers.)
|
|
||||||
trusted_servers = ["matrix.org"]
|
trusted_servers = ["matrix.org"]
|
||||||
|
|
||||||
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||||
|
#log = "warn,state_res=warn,rocket=off,_=off,sled=off"
|
||||||
# Controls the log verbosity. See also [here][0].
|
|
||||||
#
|
|
||||||
# [0]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives
|
|
||||||
#log = "..."
|
|
||||||
|
|
||||||
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
||||||
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
|
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
|
||||||
|
|
||||||
[global.well_known]
|
|
||||||
# Conduit handles the /.well-known/matrix/* endpoints, making both clients and servers try to access conduit with the host
|
|
||||||
# server_name and port 443 by default.
|
|
||||||
# If you want to override these defaults, uncomment and edit the following lines accordingly:
|
|
||||||
#server = your.server.name:443
|
|
||||||
#client = https://your.server.name
|
|
||||||
|
|
18
debian/README.md → debian/README.Debian
vendored
18
debian/README.md → debian/README.Debian
vendored
|
@ -1,36 +1,28 @@
|
||||||
Conduit for Debian
|
Conduit for Debian
|
||||||
==================
|
==================
|
||||||
|
|
||||||
Installation
|
|
||||||
------------
|
|
||||||
|
|
||||||
Information about downloading, building and deploying the Debian package, see
|
|
||||||
the "Installing Conduit" section in the Deploying docs.
|
|
||||||
All following sections until "Setting up the Reverse Proxy" be ignored because
|
|
||||||
this is handled automatically by the packaging.
|
|
||||||
|
|
||||||
Configuration
|
Configuration
|
||||||
-------------
|
-------------
|
||||||
|
|
||||||
When installed, Debconf generates the configuration of the homeserver
|
When installed, Debconf generates the configuration of the homeserver
|
||||||
(host)name, the address and port it listens on. This configuration ends up in
|
(host)name, the address and port it listens on. This configuration ends up in
|
||||||
`/etc/matrix-conduit/conduit.toml`.
|
/etc/matrix-conduit/conduit.toml.
|
||||||
|
|
||||||
You can tweak more detailed settings by uncommenting and setting the variables
|
You can tweak more detailed settings by uncommenting and setting the variables
|
||||||
in `/etc/matrix-conduit/conduit.toml`. This involves settings such as the maximum
|
in /etc/matrix-conduit/conduit.toml. This involves settings such as the maximum
|
||||||
file size for download/upload, enabling federation, etc.
|
file size for download/upload, enabling federation, etc.
|
||||||
|
|
||||||
Running
|
Running
|
||||||
-------
|
-------
|
||||||
|
|
||||||
The package uses the `matrix-conduit.service` systemd unit file to start and
|
The package uses the matrix-conduit.service systemd unit file to start and
|
||||||
stop Conduit. It loads the configuration file mentioned above to set up the
|
stop Conduit. It loads the configuration file mentioned above to set up the
|
||||||
environment before running the server.
|
environment before running the server.
|
||||||
|
|
||||||
This package assumes by default that Conduit will be placed behind a reverse
|
This package assumes by default that Conduit will be placed behind a reverse
|
||||||
proxy such as Apache or nginx. This default deployment entails just listening
|
proxy such as Apache or nginx. This default deployment entails just listening
|
||||||
on `127.0.0.1` and the free port `6167` and is reachable via a client using the URL
|
on 127.0.0.1 and the free port 6167 and is reachable via a client using the URL
|
||||||
<http://localhost:6167>.
|
http://localhost:6167.
|
||||||
|
|
||||||
At a later stage this packaging may support also setting up TLS and running
|
At a later stage this packaging may support also setting up TLS and running
|
||||||
stand-alone. In this case, however, you need to set up some certificates and
|
stand-alone. In this case, however, you need to set up some certificates and
|
30
debian/postinst
vendored
30
debian/postinst
vendored
|
@ -19,11 +19,11 @@ case "$1" in
|
||||||
_matrix-conduit
|
_matrix-conduit
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create the database path if it does not exist yet and fix up ownership
|
# Create the database path if it does not exist yet.
|
||||||
# and permissions.
|
if [ ! -d "$CONDUIT_DATABASE_PATH" ]; then
|
||||||
mkdir -p "$CONDUIT_DATABASE_PATH"
|
mkdir -p "$CONDUIT_DATABASE_PATH"
|
||||||
chown _matrix-conduit "$CONDUIT_DATABASE_PATH"
|
chown _matrix-conduit "$CONDUIT_DATABASE_PATH"
|
||||||
chmod 700 "$CONDUIT_DATABASE_PATH"
|
fi
|
||||||
|
|
||||||
if [ ! -e "$CONDUIT_CONFIG_FILE" ]; then
|
if [ ! -e "$CONDUIT_CONFIG_FILE" ]; then
|
||||||
# Write the debconf values in the config.
|
# Write the debconf values in the config.
|
||||||
|
@ -72,30 +72,12 @@ max_request_size = 20_000_000 # in bytes
|
||||||
# Enables registration. If set to false, no users can register on this server.
|
# Enables registration. If set to false, no users can register on this server.
|
||||||
allow_registration = true
|
allow_registration = true
|
||||||
|
|
||||||
# A static registration token that new users will have to provide when creating
|
|
||||||
# an account.
|
|
||||||
# - Insert a password that users will have to enter on registration
|
|
||||||
# - Start the line with '#' to remove the condition
|
|
||||||
#registration_token = ""
|
|
||||||
|
|
||||||
allow_federation = true
|
allow_federation = true
|
||||||
allow_check_for_updates = true
|
|
||||||
|
|
||||||
# Enable the display name lightning bolt on registration.
|
|
||||||
enable_lightning_bolt = true
|
|
||||||
|
|
||||||
# Servers listed here will be used to gather public keys of other servers.
|
|
||||||
# Generally, copying this exactly should be enough. (Currently, Conduit doesn't
|
|
||||||
# support batched key requests, so this list should only contain Synapse
|
|
||||||
# servers.)
|
|
||||||
trusted_servers = ["matrix.org"]
|
trusted_servers = ["matrix.org"]
|
||||||
|
|
||||||
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||||
|
#log = "warn,state_res=warn,rocket=off,_=off,sled=off"
|
||||||
# Controls the log verbosity. See also [here][0].
|
|
||||||
#
|
|
||||||
# [0]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives
|
|
||||||
#log = "..."
|
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
|
|
10
default.nix
10
default.nix
|
@ -1,10 +0,0 @@
|
||||||
(import
|
|
||||||
(
|
|
||||||
let lock = builtins.fromJSON (builtins.readFile ./flake.lock); in
|
|
||||||
fetchTarball {
|
|
||||||
url = lock.nodes.flake-compat.locked.url or "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz";
|
|
||||||
sha256 = lock.nodes.flake-compat.locked.narHash;
|
|
||||||
}
|
|
||||||
)
|
|
||||||
{ src = ./.; }
|
|
||||||
).defaultNix
|
|
|
@ -7,8 +7,8 @@ services:
|
||||||
### then you are ready to go.
|
### then you are ready to go.
|
||||||
image: matrixconduit/matrix-conduit:latest
|
image: matrixconduit/matrix-conduit:latest
|
||||||
### If you want to build a fresh image from the sources, then comment the image line and uncomment the
|
### If you want to build a fresh image from the sources, then comment the image line and uncomment the
|
||||||
### build lines. If you want meaningful labels in your built Conduit image, you should run docker compose like this:
|
### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this:
|
||||||
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker compose up -d
|
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d
|
||||||
# build:
|
# build:
|
||||||
# context: .
|
# context: .
|
||||||
# args:
|
# args:
|
||||||
|
@ -26,12 +26,12 @@ services:
|
||||||
CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/
|
CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/
|
||||||
CONDUIT_DATABASE_BACKEND: rocksdb
|
CONDUIT_DATABASE_BACKEND: rocksdb
|
||||||
CONDUIT_PORT: 6167
|
CONDUIT_PORT: 6167
|
||||||
CONDUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
||||||
CONDUIT_ALLOW_REGISTRATION: 'true'
|
CONDUIT_ALLOW_REGISTRATION: 'true'
|
||||||
CONDUIT_ALLOW_FEDERATION: 'true'
|
CONDUIT_ALLOW_FEDERATION: 'true'
|
||||||
CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
|
||||||
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||||
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
||||||
|
#CONDUIT_LOG: warn,rocket=off,_=off,sled=off
|
||||||
CONDUIT_ADDRESS: 0.0.0.0
|
CONDUIT_ADDRESS: 0.0.0.0
|
||||||
CONDUIT_CONFIG: '' # Ignore this
|
CONDUIT_CONFIG: '' # Ignore this
|
||||||
#
|
#
|
|
@ -1,39 +1,10 @@
|
||||||
# Conduit for Docker
|
# Deploy using Docker
|
||||||
|
|
||||||
> **Note:** To run and use Conduit you should probably use it with a Domain or Subdomain behind a reverse proxy (like Nginx, Traefik, Apache, ...) with a Lets Encrypt certificate.
|
> **Note:** To run and use Conduit you should probably use it with a Domain or Subdomain behind a reverse proxy (like Nginx, Traefik, Apache, ...) with a Lets Encrypt certificate.
|
||||||
|
|
||||||
## Docker
|
## Docker
|
||||||
|
|
||||||
To run Conduit with Docker you can either build the image yourself or pull it from a registry.
|
### Build & Dockerfile
|
||||||
|
|
||||||
|
|
||||||
### Use a registry
|
|
||||||
|
|
||||||
OCI images for Conduit are available in the registries listed below. We recommend using the image tagged as `latest` from GitLab's own registry.
|
|
||||||
|
|
||||||
| Registry | Image | Size | Notes |
|
|
||||||
| --------------- | --------------------------------------------------------------- | ----------------------------- | ---------------------- |
|
|
||||||
| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:latest][gl] | ![Image Size][shield-latest] | Stable image. |
|
|
||||||
| Docker Hub | [docker.io/matrixconduit/matrix-conduit:latest][dh] | ![Image Size][shield-latest] | Stable image. |
|
|
||||||
| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:next][gl] | ![Image Size][shield-next] | Development version. |
|
|
||||||
| Docker Hub | [docker.io/matrixconduit/matrix-conduit:next][dh] | ![Image Size][shield-next] | Development version. |
|
|
||||||
|
|
||||||
|
|
||||||
[dh]: https://hub.docker.com/r/matrixconduit/matrix-conduit
|
|
||||||
[gl]: https://gitlab.com/famedly/conduit/container_registry/2497937
|
|
||||||
[shield-latest]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest
|
|
||||||
[shield-next]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/next
|
|
||||||
|
|
||||||
|
|
||||||
Use
|
|
||||||
```bash
|
|
||||||
docker image pull <link>
|
|
||||||
```
|
|
||||||
to pull it to your machine.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### Build using a dockerfile
|
|
||||||
|
|
||||||
The Dockerfile provided by Conduit has two stages, each of which creates an image.
|
The Dockerfile provided by Conduit has two stages, each of which creates an image.
|
||||||
|
|
||||||
|
@ -48,11 +19,9 @@ docker build --tag matrixconduit/matrix-conduit:latest .
|
||||||
|
|
||||||
which also will tag the resulting image as `matrixconduit/matrix-conduit:latest`.
|
which also will tag the resulting image as `matrixconduit/matrix-conduit:latest`.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### Run
|
### Run
|
||||||
|
|
||||||
When you have the image you can simply run it with
|
After building the image you can simply run it with
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker run -d -p 8448:6167 \
|
docker run -d -p 8448:6167 \
|
||||||
|
@ -61,24 +30,33 @@ docker run -d -p 8448:6167 \
|
||||||
-e CONDUIT_DATABASE_BACKEND="rocksdb" \
|
-e CONDUIT_DATABASE_BACKEND="rocksdb" \
|
||||||
-e CONDUIT_ALLOW_REGISTRATION=true \
|
-e CONDUIT_ALLOW_REGISTRATION=true \
|
||||||
-e CONDUIT_ALLOW_FEDERATION=true \
|
-e CONDUIT_ALLOW_FEDERATION=true \
|
||||||
-e CONDUIT_MAX_REQUEST_SIZE="20000000" \
|
-e CONDUIT_MAX_REQUEST_SIZE="20_000_000" \
|
||||||
-e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \
|
-e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \
|
||||||
-e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \
|
-e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \
|
||||||
-e CONDUIT_PORT="6167" \
|
-e CONDUIT_LOG="warn,rocket=off,_=off,sled=off" \
|
||||||
--name conduit <link>
|
--name conduit matrixconduit/matrix-conduit:latest
|
||||||
```
|
```
|
||||||
|
|
||||||
or you can use [docker compose](#docker-compose).
|
or you can skip the build step and pull the image from one of the following registries:
|
||||||
|
|
||||||
The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../configuration.md).
|
| Registry | Image | Size |
|
||||||
|
| --------------- | --------------------------------------------------------------- | --------------------- |
|
||||||
|
| Docker Hub | [matrixconduit/matrix-conduit:latest][dh] | ![Image Size][shield] |
|
||||||
|
| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:latest][gl] | ![Image Size][shield] |
|
||||||
|
|
||||||
|
[dh]: https://hub.docker.com/r/matrixconduit/matrix-conduit
|
||||||
|
[gl]: https://gitlab.com/famedly/conduit/container_registry/2497937
|
||||||
|
[shield]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest
|
||||||
|
|
||||||
|
The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../conduit-example.toml).
|
||||||
You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need
|
You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need
|
||||||
to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible values, please take a look at the `docker-compose.yml` file.
|
to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible values, please take a look at the `docker-compose.yml` file.
|
||||||
|
|
||||||
If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it.
|
If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it.
|
||||||
|
|
||||||
### Docker compose
|
## Docker-compose
|
||||||
|
|
||||||
If the `docker run` command is not for you or your setup, you can also use one of the provided `docker compose` files.
|
If the `docker run` command is not for you or your setup, you can also use one of the provided `docker-compose` files.
|
||||||
|
|
||||||
Depending on your proxy setup, you can use one of the following files;
|
Depending on your proxy setup, you can use one of the following files;
|
||||||
- If you already have a `traefik` instance set up, use [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml)
|
- If you already have a `traefik` instance set up, use [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml)
|
||||||
|
@ -88,14 +66,15 @@ Depending on your proxy setup, you can use one of the following files;
|
||||||
When picking the traefik-related compose file, rename it so it matches `docker-compose.yml`, and
|
When picking the traefik-related compose file, rename it so it matches `docker-compose.yml`, and
|
||||||
rename the override file to `docker-compose.override.yml`. Edit the latter with the values you want
|
rename the override file to `docker-compose.override.yml`. Edit the latter with the values you want
|
||||||
for your server.
|
for your server.
|
||||||
Additional info about deploying Conduit can be found [here](generic.md).
|
|
||||||
|
Additional info about deploying Conduit can be found [here](../DEPLOY.md).
|
||||||
|
|
||||||
### Build
|
### Build
|
||||||
|
|
||||||
To build the Conduit image with docker compose, you first need to open and modify the `docker-compose.yml` file. There you need to comment the `image:` option and uncomment the `build:` option. Then call docker compose with:
|
To build the Conduit image with docker-compose, you first need to open and modify the `docker-compose.yml` file. There you need to comment the `image:` option and uncomment the `build:` option. Then call docker-compose with:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker compose up
|
docker-compose up
|
||||||
```
|
```
|
||||||
|
|
||||||
This will also start the container right afterwards, so if want it to run in detached mode, you also should use the `-d` flag.
|
This will also start the container right afterwards, so if want it to run in detached mode, you also should use the `-d` flag.
|
||||||
|
@ -105,7 +84,7 @@ This will also start the container right afterwards, so if want it to run in det
|
||||||
If you already have built the image or want to use one from the registries, you can just start the container and everything else in the compose file in detached mode with:
|
If you already have built the image or want to use one from the registries, you can just start the container and everything else in the compose file in detached mode with:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker compose up -d
|
docker-compose up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
> **Note:** Don't forget to modify and adjust the compose file to your needs.
|
> **Note:** Don't forget to modify and adjust the compose file to your needs.
|
||||||
|
@ -116,7 +95,7 @@ As a container user, you probably know about Traefik. It is a easy to use revers
|
||||||
containerized app and services available through the web. With the two provided files,
|
containerized app and services available through the web. With the two provided files,
|
||||||
[`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or
|
[`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or
|
||||||
[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and
|
[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and
|
||||||
[`docker-compose.override.yml`](docker-compose.override.yml), it is equally easy to deploy
|
[`docker-compose.override.yml`](docker-compose.override.traefik.yml), it is equally easy to deploy
|
||||||
and use Conduit, with a little caveat. If you already took a look at the files, then you should have
|
and use Conduit, with a little caveat. If you already took a look at the files, then you should have
|
||||||
seen the `well-known` service, and that is the little caveat. Traefik is simply a proxy and
|
seen the `well-known` service, and that is the little caveat. Traefik is simply a proxy and
|
||||||
loadbalancer and is not able to serve any kind of content, but for Conduit to federate, we need to
|
loadbalancer and is not able to serve any kind of content, but for Conduit to federate, we need to
|
||||||
|
@ -127,10 +106,9 @@ With the service `well-known` we use a single `nginx` container that will serve
|
||||||
|
|
||||||
So...step by step:
|
So...step by step:
|
||||||
|
|
||||||
1. Copy [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or
|
1. Copy [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml) from the repository and remove `.traefik` from the filenames.
|
||||||
[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and [`docker-compose.override.yml`](docker-compose.override.yml) from the repository and remove `.for-traefik` (or `.with-traefik`) from the filename.
|
|
||||||
2. Open both files and modify/adjust them to your needs. Meaning, change the `CONDUIT_SERVER_NAME` and the volume host mappings according to your needs.
|
2. Open both files and modify/adjust them to your needs. Meaning, change the `CONDUIT_SERVER_NAME` and the volume host mappings according to your needs.
|
||||||
3. Create the `conduit.toml` config file, an example can be found [here](../configuration.md), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars.
|
3. Create the `conduit.toml` config file, an example can be found [here](../conduit-example.toml), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars.
|
||||||
4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`.
|
4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`.
|
||||||
5. Create the files needed by the `well-known` service.
|
5. Create the files needed by the `well-known` service.
|
||||||
|
|
||||||
|
@ -158,60 +136,5 @@ So...step by step:
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
6. Run `docker compose up -d`
|
6. Run `docker-compose up -d`
|
||||||
7. Connect to your homeserver with your preferred client and create a user. You should do this immediately after starting Conduit, because the first created user is the admin.
|
7. Connect to your homeserver with your preferred client and create a user. You should do this immediately after starting Conduit, because the first created user is the admin.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Voice communication
|
|
||||||
|
|
||||||
In order to make or receive calls, a TURN server is required. Conduit suggests using [Coturn](https://github.com/coturn/coturn) for this purpose, which is also available as a Docker image. Before proceeding with the software installation, it is essential to have the necessary configurations in place.
|
|
||||||
|
|
||||||
### Configuration
|
|
||||||
|
|
||||||
Create a configuration file called `coturn.conf` containing:
|
|
||||||
|
|
||||||
```conf
|
|
||||||
use-auth-secret
|
|
||||||
static-auth-secret=<a secret key>
|
|
||||||
realm=<your server domain>
|
|
||||||
```
|
|
||||||
A common way to generate a suitable alphanumeric secret key is by using `pwgen -s 64 1`.
|
|
||||||
|
|
||||||
These same values need to be set in conduit. You can either modify conduit.toml to include these lines:
|
|
||||||
```
|
|
||||||
turn_uris = ["turn:<your server domain>?transport=udp", "turn:<your server domain>?transport=tcp"]
|
|
||||||
turn_secret = "<secret key from coturn configuration>"
|
|
||||||
```
|
|
||||||
or append the following to the docker environment variables dependig on which configuration method you used earlier:
|
|
||||||
```yml
|
|
||||||
CONDUIT_TURN_URIS: '["turn:<your server domain>?transport=udp", "turn:<your server domain>?transport=tcp"]'
|
|
||||||
CONDUIT_TURN_SECRET: "<secret key from coturn configuration>"
|
|
||||||
```
|
|
||||||
Restart Conduit to apply these changes.
|
|
||||||
|
|
||||||
### Run
|
|
||||||
Run the [Coturn](https://hub.docker.com/r/coturn/coturn) image using
|
|
||||||
```bash
|
|
||||||
docker run -d --network=host -v $(pwd)/coturn.conf:/etc/coturn/turnserver.conf coturn/coturn
|
|
||||||
```
|
|
||||||
|
|
||||||
or docker compose. For the latter, paste the following section into a file called `docker-compose.yml`
|
|
||||||
and run `docker compose up -d` in the same directory.
|
|
||||||
|
|
||||||
```yml
|
|
||||||
version: 3
|
|
||||||
services:
|
|
||||||
turn:
|
|
||||||
container_name: coturn-server
|
|
||||||
image: docker.io/coturn/coturn
|
|
||||||
restart: unless-stopped
|
|
||||||
network_mode: "host"
|
|
||||||
volumes:
|
|
||||||
- ./coturn.conf:/etc/coturn/turnserver.conf
|
|
||||||
```
|
|
||||||
|
|
||||||
To understand why the host networking mode is used and explore alternative configuration options, please visit the following link: https://github.com/coturn/coturn/blob/master/docker/coturn/README.md.
|
|
||||||
For security recommendations see Synapse's [Coturn documentation](https://github.com/matrix-org/synapse/blob/develop/docs/setup/turn/coturn.md#configuration).
|
|
||||||
|
|
|
@ -7,8 +7,8 @@ services:
|
||||||
### then you are ready to go.
|
### then you are ready to go.
|
||||||
image: matrixconduit/matrix-conduit:latest
|
image: matrixconduit/matrix-conduit:latest
|
||||||
### If you want to build a fresh image from the sources, then comment the image line and uncomment the
|
### If you want to build a fresh image from the sources, then comment the image line and uncomment the
|
||||||
### build lines. If you want meaningful labels in your built Conduit image, you should run docker compose like this:
|
### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this:
|
||||||
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker compose up -d
|
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d
|
||||||
# build:
|
# build:
|
||||||
# context: .
|
# context: .
|
||||||
# args:
|
# args:
|
||||||
|
@ -26,19 +26,18 @@ services:
|
||||||
CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/
|
CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/
|
||||||
CONDUIT_DATABASE_BACKEND: rocksdb
|
CONDUIT_DATABASE_BACKEND: rocksdb
|
||||||
CONDUIT_PORT: 6167
|
CONDUIT_PORT: 6167
|
||||||
CONDUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
||||||
CONDUIT_ALLOW_REGISTRATION: 'true'
|
CONDUIT_ALLOW_REGISTRATION: 'true'
|
||||||
#CONDUIT_REGISTRATION_TOKEN: '' # require password for registration
|
|
||||||
CONDUIT_ALLOW_FEDERATION: 'true'
|
CONDUIT_ALLOW_FEDERATION: 'true'
|
||||||
CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
|
||||||
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||||
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
||||||
|
#CONDUIT_LOG: warn,rocket=off,_=off,sled=off
|
||||||
CONDUIT_ADDRESS: 0.0.0.0
|
CONDUIT_ADDRESS: 0.0.0.0
|
||||||
CONDUIT_CONFIG: '' # Ignore this
|
CONDUIT_CONFIG: '' # Ignore this
|
||||||
|
|
||||||
# We need some way to server the client and server .well-known json. The simplest way is to use a nginx container
|
# We need some way to server the client and server .well-known json. The simplest way is to use a nginx container
|
||||||
# to serve those two as static files. If you want to use a different way, delete or comment the below service, here
|
# to serve those two as static files. If you want to use a different way, delete or comment the below service, here
|
||||||
# and in the docker compose override file.
|
# and in the docker-compose override file.
|
||||||
well-known:
|
well-known:
|
||||||
image: nginx:latest
|
image: nginx:latest
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
|
@ -18,7 +18,7 @@ services:
|
||||||
|
|
||||||
# We need some way to server the client and server .well-known json. The simplest way is to use a nginx container
|
# We need some way to server the client and server .well-known json. The simplest way is to use a nginx container
|
||||||
# to serve those two as static files. If you want to use a different way, delete or comment the below service, here
|
# to serve those two as static files. If you want to use a different way, delete or comment the below service, here
|
||||||
# and in the docker compose file.
|
# and in the docker-compose file.
|
||||||
well-known:
|
well-known:
|
||||||
labels:
|
labels:
|
||||||
- "traefik.enable=true"
|
- "traefik.enable=true"
|
|
@ -7,8 +7,8 @@ services:
|
||||||
### then you are ready to go.
|
### then you are ready to go.
|
||||||
image: matrixconduit/matrix-conduit:latest
|
image: matrixconduit/matrix-conduit:latest
|
||||||
### If you want to build a fresh image from the sources, then comment the image line and uncomment the
|
### If you want to build a fresh image from the sources, then comment the image line and uncomment the
|
||||||
### build lines. If you want meaningful labels in your built Conduit image, you should run docker compose like this:
|
### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this:
|
||||||
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker compose up -d
|
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d
|
||||||
# build:
|
# build:
|
||||||
# context: .
|
# context: .
|
||||||
# args:
|
# args:
|
||||||
|
@ -31,18 +31,19 @@ services:
|
||||||
### Uncomment and change values as desired
|
### Uncomment and change values as desired
|
||||||
# CONDUIT_ADDRESS: 0.0.0.0
|
# CONDUIT_ADDRESS: 0.0.0.0
|
||||||
# CONDUIT_PORT: 6167
|
# CONDUIT_PORT: 6167
|
||||||
# CONDUIT_REGISTRATION_TOKEN: '' # require password for registration
|
|
||||||
# CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string ''
|
# CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string ''
|
||||||
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
|
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
|
||||||
# CONDUIT_ALLOW_ENCRYPTION: 'true'
|
# CONDUIT_LOG: info # default is: "warn,_=off,sled=off"
|
||||||
# CONDUIT_ALLOW_FEDERATION: 'true'
|
# CONDUIT_ALLOW_JAEGER: 'false'
|
||||||
# CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
# CONDUIT_ALLOW_ENCRYPTION: 'false'
|
||||||
|
# CONDUIT_ALLOW_FEDERATION: 'false'
|
||||||
# CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit
|
# CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit
|
||||||
# CONDUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
# CONDUIT_WORKERS: 10
|
||||||
|
# CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
||||||
|
|
||||||
# We need some way to server the client and server .well-known json. The simplest way is to use a nginx container
|
# We need some way to server the client and server .well-known json. The simplest way is to use a nginx container
|
||||||
# to serve those two as static files. If you want to use a different way, delete or comment the below service, here
|
# to serve those two as static files. If you want to use a different way, delete or comment the below service, here
|
||||||
# and in the docker compose override file.
|
# and in the docker-compose override file.
|
||||||
well-known:
|
well-known:
|
||||||
image: nginx:latest
|
image: nginx:latest
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
|
@ -6,14 +6,9 @@ if [ -z "${CONDUIT_PORT}" ]; then
|
||||||
CONDUIT_PORT=$(ss -tlpn | grep conduit | grep -m1 -o ':[0-9]*' | grep -m1 -o '[0-9]*')
|
CONDUIT_PORT=$(ss -tlpn | grep conduit | grep -m1 -o ':[0-9]*' | grep -m1 -o '[0-9]*')
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If CONDUIT_ADDRESS is not set try to get the address from the process list
|
|
||||||
if [ -z "${CONDUIT_ADDRESS}" ]; then
|
|
||||||
CONDUIT_ADDRESS=$(ss -tlpn | awk -F ' +|:' '/conduit/ { print $4 }')
|
|
||||||
fi
|
|
||||||
|
|
||||||
# The actual health check.
|
# The actual health check.
|
||||||
# We try to first get a response on HTTP and when that fails on HTTPS and when that fails, we exit with code 1.
|
# We try to first get a response on HTTP and when that fails on HTTPS and when that fails, we exit with code 1.
|
||||||
# TODO: Change this to a single wget call. Do we have a config value that we can check for that?
|
# TODO: Change this to a single wget call. Do we have a config value that we can check for that?
|
||||||
wget --no-verbose --tries=1 --spider "http://${CONDUIT_ADDRESS}:${CONDUIT_PORT}/_matrix/client/versions" || \
|
wget --no-verbose --tries=1 --spider "http://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \
|
||||||
wget --no-verbose --tries=1 --spider "https://${CONDUIT_ADDRESS}:${CONDUIT_PORT}/_matrix/client/versions" || \
|
wget --no-verbose --tries=1 --spider "https://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \
|
||||||
exit 1
|
exit 1
|
||||||
|
|
|
@ -1,14 +0,0 @@
|
||||||
# Summary
|
|
||||||
|
|
||||||
- [Introduction](introduction.md)
|
|
||||||
|
|
||||||
- [Configuration](configuration.md)
|
|
||||||
- [Delegation](delegation.md)
|
|
||||||
- [Deploying](deploying.md)
|
|
||||||
- [Generic](deploying/generic.md)
|
|
||||||
- [Debian](deploying/debian.md)
|
|
||||||
- [Docker](deploying/docker.md)
|
|
||||||
- [NixOS](deploying/nixos.md)
|
|
||||||
- [TURN](turn.md)
|
|
||||||
- [Appservices](appservices.md)
|
|
||||||
- [FAQ](faq.md)
|
|
|
@ -1,114 +0,0 @@
|
||||||
# Configuration
|
|
||||||
|
|
||||||
**Conduit** is configured using a TOML file. The configuration file is loaded from the path specified by the `CONDUIT_CONFIG` environment variable.
|
|
||||||
|
|
||||||
> **Note:** The configuration file is required to run Conduit. If the `CONDUIT_CONFIG` environment variable is not set, Conduit will exit with an error.
|
|
||||||
|
|
||||||
> **Note:** If you update the configuration file, you must restart Conduit for the changes to take effect
|
|
||||||
|
|
||||||
> **Note:** You can also configure Conduit by using `CONDUIT_{field_name}` environment variables. To set values inside a table, use `CONDUIT_{table_name}__{field_name}`. Example: `CONDUIT_SERVER_NAME="example.org"`
|
|
||||||
|
|
||||||
Conduit's configuration file is divided into the following sections:
|
|
||||||
|
|
||||||
- [Global](#global)
|
|
||||||
- [TLS](#tls)
|
|
||||||
- [Proxy](#proxy)
|
|
||||||
|
|
||||||
|
|
||||||
## Global
|
|
||||||
|
|
||||||
The `global` section contains the following fields:
|
|
||||||
|
|
||||||
> **Note:** The `*` symbol indicates that the field is required, and the values in **parentheses** are the possible values
|
|
||||||
|
|
||||||
| Field | Type | Description | Default |
|
|
||||||
| --- | --- | --- | --- |
|
|
||||||
| `address` | `string` | The address to bind to | `"127.0.0.1"` |
|
|
||||||
| `port` | `integer` | The port to bind to | `8000` |
|
|
||||||
| `tls` | `table` | See the [TLS configuration](#tls) | N/A |
|
|
||||||
| `server_name`_*_ | `string` | The server name | N/A |
|
|
||||||
| `database_backend`_*_ | `string` | The database backend to use (`"rocksdb"` *recommended*, `"sqlite"`) | N/A |
|
|
||||||
| `database_path`_*_ | `string` | The path to the database file/dir | N/A |
|
|
||||||
| `db_cache_capacity_mb` | `float` | The cache capacity, in MB | `300.0` |
|
|
||||||
| `enable_lightning_bolt` | `boolean` | Add `⚡️` emoji to end of user's display name | `true` |
|
|
||||||
| `allow_check_for_updates` | `boolean` | Allow Conduit to check for updates | `true` |
|
|
||||||
| `conduit_cache_capacity_modifier` | `float` | The value to multiply the default cache capacity by | `1.0` |
|
|
||||||
| `rocksdb_max_open_files` | `integer` | The maximum number of open files | `1000` |
|
|
||||||
| `pdu_cache_capacity` | `integer` | The maximum number of Persisted Data Units (PDUs) to cache | `150000` |
|
|
||||||
| `cleanup_second_interval` | `integer` | How often conduit should clean up the database, in seconds | `60` |
|
|
||||||
| `max_request_size` | `integer` | The maximum request size, in bytes | `20971520` (20 MiB) |
|
|
||||||
| `max_concurrent_requests` | `integer` | The maximum number of concurrent requests | `100` |
|
|
||||||
| `max_fetch_prev_events` | `integer` | The maximum number of previous events to fetch per request if conduit notices events are missing | `100` |
|
|
||||||
| `allow_registration` | `boolean` | Opens your homeserver to public registration | `false` |
|
|
||||||
| `registration_token` | `string` | The token users need to have when registering to your homeserver | N/A |
|
|
||||||
| `allow_encryption` | `boolean` | Allow users to enable encryption in their rooms | `true` |
|
|
||||||
| `allow_federation` | `boolean` | Allow federation with other servers | `true` |
|
|
||||||
| `allow_room_creation` | `boolean` | Allow users to create rooms | `true` |
|
|
||||||
| `allow_unstable_room_versions` | `boolean` | Allow users to create and join rooms with unstable versions | `true` |
|
|
||||||
| `default_room_version` | `string` | The default room version (`"6"`-`"10"`)| `"10"` |
|
|
||||||
| `allow_jaeger` | `boolean` | Allow Jaeger tracing | `false` |
|
|
||||||
| `tracing_flame` | `boolean` | Enable flame tracing | `false` |
|
|
||||||
| `proxy` | `table` | See the [Proxy configuration](#proxy) | N/A |
|
|
||||||
| `jwt_secret` | `string` | The secret used in the JWT to enable JWT login without it a 400 error will be returned | N/A |
|
|
||||||
| `trusted_servers` | `array` | The list of trusted servers to gather public keys of offline servers | `["matrix.org"]` |
|
|
||||||
| `log` | `string` | The log verbosity to use | `"warn"` |
|
|
||||||
| `turn_username` | `string` | The TURN username | `""` |
|
|
||||||
| `turn_password` | `string` | The TURN password | `""` |
|
|
||||||
| `turn_uris` | `array` | The TURN URIs | `[]` |
|
|
||||||
| `turn_secret` | `string` | The TURN secret | `""` |
|
|
||||||
| `turn_ttl` | `integer` | The TURN TTL in seconds | `86400` |
|
|
||||||
| `emergency_password` | `string` | Set a password to login as the `conduit` user in case of emergency | N/A |
|
|
||||||
| `well_known_client` | `string` | Used for [delegation](delegation.md) | See [delegation](delegation.md) |
|
|
||||||
| `well_known_server` | `string` | Used for [delegation](delegation.md) | See [delegation](delegation.md) |
|
|
||||||
|
|
||||||
|
|
||||||
### TLS
|
|
||||||
The `tls` table contains the following fields:
|
|
||||||
- `certs`: The path to the public PEM certificate
|
|
||||||
- `key`: The path to the PEM private key
|
|
||||||
|
|
||||||
#### Example
|
|
||||||
```toml
|
|
||||||
[global.tls]
|
|
||||||
certs = "/path/to/cert.pem"
|
|
||||||
key = "/path/to/key.pem"
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
### Proxy
|
|
||||||
You can choose what requests conduit should proxy (if any). The `proxy` table contains the following fields
|
|
||||||
|
|
||||||
#### Global
|
|
||||||
The global option will proxy all outgoing requests. The `global` table contains the following fields:
|
|
||||||
- `url`: The URL of the proxy server
|
|
||||||
##### Example
|
|
||||||
```toml
|
|
||||||
[global.proxy.global]
|
|
||||||
url = "https://example.com"
|
|
||||||
```
|
|
||||||
|
|
||||||
#### By domain
|
|
||||||
An array of tables that contain the following fields:
|
|
||||||
- `url`: The URL of the proxy server
|
|
||||||
- `include`: Domains that should be proxied (assumed to be `["*"]` if unset)
|
|
||||||
- `exclude`: Domains that should not be proxied (takes precedent over `include`)
|
|
||||||
|
|
||||||
Both `include` and `exclude` allow for glob pattern matching.
|
|
||||||
##### Example
|
|
||||||
In this example, all requests to domains ending in `.onion` and `matrix.secretly-an-onion-domain.xyz`
|
|
||||||
will be proxied via `socks://localhost:9050`, except for domains ending in `.myspecial.onion`. You can add as many `by_domain` tables as you need.
|
|
||||||
```toml
|
|
||||||
[[global.proxy.by_domain]]
|
|
||||||
url = "socks5://localhost:9050"
|
|
||||||
include = ["*.onion", "matrix.secretly-an-onion-domain.xyz"]
|
|
||||||
exclude = ["*.clearnet.onion"]
|
|
||||||
```
|
|
||||||
|
|
||||||
### Example
|
|
||||||
|
|
||||||
> **Note:** The following example is a minimal configuration file. You should replace the values with your own.
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[global]
|
|
||||||
{{#include ../conduit-example.toml:22:}}
|
|
||||||
```
|
|
|
@ -1,69 +0,0 @@
|
||||||
# Delegation
|
|
||||||
|
|
||||||
You can run Conduit on a separate domain than the actual server name (what shows up in user ids, aliases, etc.).
|
|
||||||
For example you can have your users have IDs such as `@foo:example.org` and have aliases like `#bar:example.org`,
|
|
||||||
while actually having Conduit hosted on the `matrix.example.org` domain. This is called delegation.
|
|
||||||
|
|
||||||
## Automatic (recommended)
|
|
||||||
|
|
||||||
Conduit has support for hosting delegation files by itself, and by default uses it to serve federation traffic on port 443.
|
|
||||||
|
|
||||||
With this method, you need to direct requests to `/.well-known/matrix/*` to Conduit in your reverse proxy.
|
|
||||||
|
|
||||||
This is only recommended if Conduit is on the same physical server as the server which serves your server name (e.g. example.org)
|
|
||||||
as servers don't always seem to cache the response, leading to slower response times otherwise, but it should also work if you
|
|
||||||
are connected to the server running Conduit using something like a VPN.
|
|
||||||
|
|
||||||
> **Note**: this will automatically allow you to use [sliding sync][0] without any extra configuration
|
|
||||||
|
|
||||||
To configure it, use the following options:
|
|
||||||
| Field | Type | Description | Default |
|
|
||||||
| --- | --- | --- | --- |
|
|
||||||
| `well_known_client` | `String` | The URL that clients should use to connect to Conduit | `https://<server_name>` |
|
|
||||||
| `well_known_server` | `String` | The hostname and port servers should use to connect to Conduit | `<server_name>:443` |
|
|
||||||
|
|
||||||
### Example
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[global]
|
|
||||||
well_known_client = "https://matrix.example.org"
|
|
||||||
well_known_server = "matrix.example.org:443"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Manual
|
|
||||||
|
|
||||||
Alternatively you can serve static JSON files to inform clients and servers how to connect to Conduit.
|
|
||||||
|
|
||||||
### Servers
|
|
||||||
|
|
||||||
For servers to discover how to access your domain, serve a response in the following format for `/.well-known/matrix/server`:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"m.server": "matrix.example.org:443"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
Where `matrix.example.org` is the domain and `443` is the port Conduit is accessible at.
|
|
||||||
|
|
||||||
### Clients
|
|
||||||
|
|
||||||
For clients to discover how to access your domain, serve a response in the following format for `/.well-known/matrix/client`:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"m.homeserver": {
|
|
||||||
"base_url": "https://matrix.example.org"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
Where `matrix.example.org` is the URL Conduit is accessible at.
|
|
||||||
|
|
||||||
To ensure that all clients can access this endpoint, it is recommended you set the following headers for this endpoint:
|
|
||||||
```
|
|
||||||
Access-Control-Allow-Origin: *
|
|
||||||
Access-Control-Allow-Methods: GET, POST, PUT, DELETE, OPTIONS
|
|
||||||
Access-Control-Allow-Headers: X-Requested-With, Content-Type, Authorization
|
|
||||||
```
|
|
||||||
|
|
||||||
If you also want to be able to use [sliding sync][0], look [here](faq.md#how-do-i-setup-sliding-sync).
|
|
||||||
|
|
||||||
[0]: https://matrix.org/blog/2023/09/matrix-2-0/#sliding-sync
|
|
|
@ -1,3 +0,0 @@
|
||||||
# Deploying
|
|
||||||
|
|
||||||
This chapter describes various ways to deploy Conduit.
|
|
|
@ -1 +0,0 @@
|
||||||
{{#include ../../debian/README.md}}
|
|
|
@ -1,289 +0,0 @@
|
||||||
# Generic deployment documentation
|
|
||||||
|
|
||||||
> ## Getting help
|
|
||||||
>
|
|
||||||
> If you run into any problems while setting up Conduit, write an email to `conduit@koesters.xyz`, ask us
|
|
||||||
> in `#conduit:fachschaften.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new).
|
|
||||||
|
|
||||||
## Installing Conduit
|
|
||||||
|
|
||||||
Although you might be able to compile Conduit for Windows, we do recommend running it on a Linux server. We therefore
|
|
||||||
only offer Linux binaries.
|
|
||||||
|
|
||||||
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. For `arm`, you should use `aarch`. Now copy the appropriate url:
|
|
||||||
|
|
||||||
**Stable/Main versions:**
|
|
||||||
|
|
||||||
| Target | Type | Download |
|
|
||||||
|-|-|-|
|
|
||||||
| `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/x86_64-unknown-linux-musl.deb?job=artifacts) |
|
|
||||||
| `aarch64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/aarch64-unknown-linux-musl.deb?job=artifacts) |
|
|
||||||
| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/x86_64-unknown-linux-musl?job=artifacts) |
|
|
||||||
| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/aarch64-unknown-linux-musl?job=artifacts) |
|
|
||||||
| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/oci-image-amd64.tar.gz?job=artifacts) |
|
|
||||||
| `aarch64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/oci-image-arm64v8.tar.gz?job=artifacts) |
|
|
||||||
|
|
||||||
These builds were created on and linked against the glibc version shipped with Debian bullseye.
|
|
||||||
If you use a system with an older glibc version (e.g. RHEL8), you might need to compile Conduit yourself.
|
|
||||||
|
|
||||||
**Latest/Next versions:**
|
|
||||||
|
|
||||||
| Target | Type | Download |
|
|
||||||
|-|-|-|
|
|
||||||
| `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl.deb?job=artifacts) |
|
|
||||||
| `aarch64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl.deb?job=artifacts) |
|
|
||||||
| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl?job=artifacts) |
|
|
||||||
| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl?job=artifacts) |
|
|
||||||
| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=artifacts) |
|
|
||||||
| `aarch64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-arm64v8.tar.gz?job=artifacts) |
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ sudo wget -O /usr/local/bin/matrix-conduit <url>
|
|
||||||
$ sudo chmod +x /usr/local/bin/matrix-conduit
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively, you may compile the binary yourself. First, install any dependencies:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Debian
|
|
||||||
$ sudo apt install libclang-dev build-essential
|
|
||||||
|
|
||||||
# RHEL
|
|
||||||
$ sudo dnf install clang
|
|
||||||
```
|
|
||||||
Then, `cd` into the source tree of conduit-next and run:
|
|
||||||
```bash
|
|
||||||
$ cargo build --release
|
|
||||||
```
|
|
||||||
|
|
||||||
## Adding a Conduit user
|
|
||||||
|
|
||||||
While Conduit can run as any user it is usually better to use dedicated users for different services. This also allows
|
|
||||||
you to make sure that the file permissions are correctly set up.
|
|
||||||
|
|
||||||
In Debian or RHEL, you can use this command to create a Conduit user:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
sudo adduser --system conduit --group --disabled-login --no-create-home
|
|
||||||
```
|
|
||||||
|
|
||||||
## Forwarding ports in the firewall or the router
|
|
||||||
|
|
||||||
Conduit uses the ports 443 and 8448 both of which need to be open in the firewall.
|
|
||||||
|
|
||||||
If Conduit runs behind a router or in a container and has a different public IP address than the host system these public ports need to be forwarded directly or indirectly to the port mentioned in the config.
|
|
||||||
|
|
||||||
## Optional: Avoid port 8448
|
|
||||||
|
|
||||||
If Conduit runs behind Cloudflare reverse proxy, which doesn't support port 8448 on free plans, [delegation](https://matrix-org.github.io/synapse/latest/delegate.html) can be set up to have federation traffic routed to port 443:
|
|
||||||
```apache
|
|
||||||
# .well-known delegation on Apache
|
|
||||||
<Files "/.well-known/matrix/server">
|
|
||||||
ErrorDocument 200 '{"m.server": "your.server.name:443"}'
|
|
||||||
Header always set Content-Type application/json
|
|
||||||
Header always set Access-Control-Allow-Origin *
|
|
||||||
</Files>
|
|
||||||
```
|
|
||||||
[SRV DNS record](https://spec.matrix.org/latest/server-server-api/#resolving-server-names) delegation is also [possible](https://www.cloudflare.com/en-gb/learning/dns/dns-records/dns-srv-record/).
|
|
||||||
|
|
||||||
## Setting up a systemd service
|
|
||||||
|
|
||||||
Now we'll set up a systemd service for Conduit, so it's easy to start/stop Conduit and set it to autostart when your
|
|
||||||
server reboots. Simply paste the default systemd service you can find below into
|
|
||||||
`/etc/systemd/system/conduit.service`.
|
|
||||||
|
|
||||||
```systemd
|
|
||||||
[Unit]
|
|
||||||
Description=Conduit Matrix Server
|
|
||||||
After=network.target
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Environment="CONDUIT_CONFIG=/etc/matrix-conduit/conduit.toml"
|
|
||||||
User=conduit
|
|
||||||
Group=conduit
|
|
||||||
Restart=always
|
|
||||||
ExecStart=/usr/local/bin/matrix-conduit
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
```
|
|
||||||
|
|
||||||
Finally, run
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ sudo systemctl daemon-reload
|
|
||||||
```
|
|
||||||
|
|
||||||
## Creating the Conduit configuration file
|
|
||||||
|
|
||||||
Now we need to create the Conduit's config file in
|
|
||||||
`/etc/matrix-conduit/conduit.toml`. Paste in the contents of
|
|
||||||
[`conduit-example.toml`](../configuration.md) **and take a moment to read it.
|
|
||||||
You need to change at least the server name.**
|
|
||||||
You can also choose to use a different database backend, but right now only `rocksdb` and `sqlite` are recommended.
|
|
||||||
|
|
||||||
## Setting the correct file permissions
|
|
||||||
|
|
||||||
As we are using a Conduit specific user we need to allow it to read the config. To do that you can run this command on
|
|
||||||
Debian or RHEL:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
sudo chown -R root:root /etc/matrix-conduit
|
|
||||||
sudo chmod 755 /etc/matrix-conduit
|
|
||||||
```
|
|
||||||
|
|
||||||
If you use the default database path you also need to run this:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
sudo mkdir -p /var/lib/matrix-conduit/
|
|
||||||
sudo chown -R conduit:conduit /var/lib/matrix-conduit/
|
|
||||||
sudo chmod 700 /var/lib/matrix-conduit/
|
|
||||||
```
|
|
||||||
|
|
||||||
## Setting up the Reverse Proxy
|
|
||||||
|
|
||||||
This depends on whether you use Apache, Caddy, Nginx or another web server.
|
|
||||||
|
|
||||||
### Apache
|
|
||||||
|
|
||||||
Create `/etc/apache2/sites-enabled/050-conduit.conf` and copy-and-paste this:
|
|
||||||
|
|
||||||
```apache
|
|
||||||
# Requires mod_proxy and mod_proxy_http
|
|
||||||
#
|
|
||||||
# On Apache instance compiled from source,
|
|
||||||
# paste into httpd-ssl.conf or httpd.conf
|
|
||||||
|
|
||||||
Listen 8448
|
|
||||||
|
|
||||||
<VirtualHost *:443 *:8448>
|
|
||||||
|
|
||||||
ServerName your.server.name # EDIT THIS
|
|
||||||
|
|
||||||
AllowEncodedSlashes NoDecode
|
|
||||||
ProxyPass /_matrix/ http://127.0.0.1:6167/_matrix/ timeout=300 nocanon
|
|
||||||
ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/
|
|
||||||
|
|
||||||
</VirtualHost>
|
|
||||||
```
|
|
||||||
|
|
||||||
**You need to make some edits again.** When you are done, run
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Debian
|
|
||||||
$ sudo systemctl reload apache2
|
|
||||||
|
|
||||||
# Installed from source
|
|
||||||
$ sudo apachectl -k graceful
|
|
||||||
```
|
|
||||||
|
|
||||||
### Caddy
|
|
||||||
|
|
||||||
Create `/etc/caddy/conf.d/conduit_caddyfile` and enter this (substitute for your server name).
|
|
||||||
|
|
||||||
```caddy
|
|
||||||
your.server.name, your.server.name:8448 {
|
|
||||||
reverse_proxy /_matrix/* 127.0.0.1:6167
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
That's it! Just start or enable the service and you're set.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ sudo systemctl enable caddy
|
|
||||||
```
|
|
||||||
|
|
||||||
### Nginx
|
|
||||||
|
|
||||||
If you use Nginx and not Apache, add the following server section inside the http section of `/etc/nginx/nginx.conf`
|
|
||||||
|
|
||||||
```nginx
|
|
||||||
server {
|
|
||||||
listen 443 ssl http2;
|
|
||||||
listen [::]:443 ssl http2;
|
|
||||||
listen 8448 ssl http2;
|
|
||||||
listen [::]:8448 ssl http2;
|
|
||||||
server_name your.server.name; # EDIT THIS
|
|
||||||
merge_slashes off;
|
|
||||||
|
|
||||||
# Nginx defaults to only allow 1MB uploads
|
|
||||||
# Increase this to allow posting large files such as videos
|
|
||||||
client_max_body_size 20M;
|
|
||||||
|
|
||||||
location /_matrix/ {
|
|
||||||
proxy_pass http://127.0.0.1:6167;
|
|
||||||
proxy_set_header Host $http_host;
|
|
||||||
proxy_buffering off;
|
|
||||||
proxy_read_timeout 5m;
|
|
||||||
}
|
|
||||||
|
|
||||||
ssl_certificate /etc/letsencrypt/live/your.server.name/fullchain.pem; # EDIT THIS
|
|
||||||
ssl_certificate_key /etc/letsencrypt/live/your.server.name/privkey.pem; # EDIT THIS
|
|
||||||
ssl_trusted_certificate /etc/letsencrypt/live/your.server.name/chain.pem; # EDIT THIS
|
|
||||||
include /etc/letsencrypt/options-ssl-nginx.conf;
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
**You need to make some edits again.** When you are done, run
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ sudo systemctl reload nginx
|
|
||||||
```
|
|
||||||
|
|
||||||
## SSL Certificate
|
|
||||||
|
|
||||||
If you chose Caddy as your web proxy SSL certificates are handled automatically and you can skip this step.
|
|
||||||
|
|
||||||
The easiest way to get an SSL certificate, if you don't have one already, is to [install](https://certbot.eff.org/instructions) `certbot` and run this:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# To use ECC for the private key,
|
|
||||||
# paste into /etc/letsencrypt/cli.ini:
|
|
||||||
# key-type = ecdsa
|
|
||||||
# elliptic-curve = secp384r1
|
|
||||||
|
|
||||||
$ sudo certbot -d your.server.name
|
|
||||||
```
|
|
||||||
[Automated renewal](https://eff-certbot.readthedocs.io/en/stable/using.html#automated-renewals) is usually preconfigured.
|
|
||||||
|
|
||||||
If using Cloudflare, configure instead the edge and origin certificates in dashboard. In case you’re already running a website on the same Apache server, you can just copy-and-paste the SSL configuration from your main virtual host on port 443 into the above-mentioned vhost.
|
|
||||||
|
|
||||||
## You're done!
|
|
||||||
|
|
||||||
Now you can start Conduit with:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ sudo systemctl start conduit
|
|
||||||
```
|
|
||||||
|
|
||||||
Set it to start automatically when your system boots with:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ sudo systemctl enable conduit
|
|
||||||
```
|
|
||||||
|
|
||||||
## How do I know it works?
|
|
||||||
|
|
||||||
You can open [a Matrix client](https://matrix.org/ecosystem/clients), enter your homeserver and try to register. If you are using a registration token, use [Element web](https://app.element.io/), [Nheko](https://matrix.org/ecosystem/clients/nheko/) or [SchildiChat web](https://app.schildi.chat/), as they support this feature.
|
|
||||||
|
|
||||||
You can also use these commands as a quick health check.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ curl https://your.server.name/_matrix/client/versions
|
|
||||||
|
|
||||||
# If using port 8448
|
|
||||||
$ curl https://your.server.name:8448/_matrix/client/versions
|
|
||||||
```
|
|
||||||
|
|
||||||
- To check if your server can talk with other homeservers, you can use the [Matrix Federation Tester](https://federationtester.matrix.org/).
|
|
||||||
If you can register but cannot join federated rooms check your config again and also check if the port 8448 is open and forwarded correctly.
|
|
||||||
|
|
||||||
# What's next?
|
|
||||||
|
|
||||||
## Audio/Video calls
|
|
||||||
|
|
||||||
For Audio/Video call functionality see the [TURN Guide](../turn.md).
|
|
||||||
|
|
||||||
## Appservices
|
|
||||||
|
|
||||||
If you want to set up an appservice, take a look at the [Appservice Guide](../appservices.md).
|
|
|
@ -1,18 +0,0 @@
|
||||||
# Conduit for NixOS
|
|
||||||
|
|
||||||
Conduit can be acquired by Nix from various places:
|
|
||||||
|
|
||||||
* The `flake.nix` at the root of the repo
|
|
||||||
* The `default.nix` at the root of the repo
|
|
||||||
* From Nixpkgs
|
|
||||||
|
|
||||||
The `flake.nix` and `default.nix` do not (currently) provide a NixOS module, so
|
|
||||||
(for now) [`services.matrix-conduit`][module] from Nixpkgs should be used to
|
|
||||||
configure Conduit.
|
|
||||||
|
|
||||||
If you want to run the latest code, you should get Conduit from the `flake.nix`
|
|
||||||
or `default.nix` and set [`services.matrix-conduit.package`][package]
|
|
||||||
appropriately.
|
|
||||||
|
|
||||||
[module]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit
|
|
||||||
[package]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit.package
|
|
41
docs/faq.md
41
docs/faq.md
|
@ -1,41 +0,0 @@
|
||||||
# FAQ
|
|
||||||
|
|
||||||
Here are some of the most frequently asked questions about Conduit, and their answers.
|
|
||||||
|
|
||||||
## Why do I get a `M_INCOMPATIBLE_ROOM_VERSION` error when trying to join some rooms?
|
|
||||||
|
|
||||||
Conduit doesn't support room versions 1 and 2 at all, and doesn't properly support versions 3-5 currently. You can track the progress of adding support [here](https://gitlab.com/famedly/conduit/-/issues/433).
|
|
||||||
|
|
||||||
## How do I backup my server?
|
|
||||||
|
|
||||||
To backup your Conduit server, it's very easy.
|
|
||||||
You can simply stop Conduit, make a copy or file system snapshot of the database directory, then start Conduit again.
|
|
||||||
|
|
||||||
> **Note**: When using a file system snapshot, it is not required that you stop the server, but it is still recommended as it is the safest option and should ensure your database is not left in an inconsistent state.
|
|
||||||
|
|
||||||
## How do I setup sliding sync?
|
|
||||||
|
|
||||||
If you use the [automatic method for delegation](delegation.md#automatic-recommended) or just proxy `.well-known/matrix/client` to Conduit, sliding sync should work with no extra configuration.
|
|
||||||
If you don't, continue below.
|
|
||||||
|
|
||||||
You need to add a `org.matrix.msc3575.proxy` field to your `.well-known/matrix/client` response which contains a url which Conduit is accessible behind.
|
|
||||||
Here is an example:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
~ "m.homeserver": {
|
|
||||||
~ "base_url": "https://matrix.example.org"
|
|
||||||
~ },
|
|
||||||
"org.matrix.msc3575.proxy": {
|
|
||||||
"url": "https://matrix.example.org"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Can I migrate from Synapse to Conduit?
|
|
||||||
|
|
||||||
Not really. You can reuse the domain of your current server with Conduit, but you will not be able to migrate accounts automatically.
|
|
||||||
Rooms that were federated can be re-joined via the other participating servers, however media and the like may be deleted from remote servers after some time, and hence might not be recoverable.
|
|
||||||
|
|
||||||
## How do I make someone an admin?
|
|
||||||
|
|
||||||
Simply invite them to the admin room. Once joined, they can administer the server by interacting with the `@conduit:<server_name>` user.
|
|
|
@ -1,13 +0,0 @@
|
||||||
# Conduit
|
|
||||||
|
|
||||||
{{#include ../README.md:catchphrase}}
|
|
||||||
|
|
||||||
{{#include ../README.md:body}}
|
|
||||||
|
|
||||||
#### How can I deploy my own?
|
|
||||||
|
|
||||||
- [Deployment options](deploying.md)
|
|
||||||
|
|
||||||
If you want to connect an Appservice to Conduit, take a look at the [appservices documentation](appservices.md).
|
|
||||||
|
|
||||||
{{#include ../README.md:footer}}
|
|
79
engage.toml
79
engage.toml
|
@ -1,79 +0,0 @@
|
||||||
interpreter = ["bash", "-euo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "versions"
|
|
||||||
name = "engage"
|
|
||||||
script = "engage --version"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "versions"
|
|
||||||
name = "rustc"
|
|
||||||
script = "rustc --version"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "versions"
|
|
||||||
name = "cargo"
|
|
||||||
script = "cargo --version"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "versions"
|
|
||||||
name = "cargo-fmt"
|
|
||||||
script = "cargo fmt --version"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "versions"
|
|
||||||
name = "rustdoc"
|
|
||||||
script = "rustdoc --version"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "versions"
|
|
||||||
name = "cargo-clippy"
|
|
||||||
script = "cargo clippy -- --version"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "versions"
|
|
||||||
name = "lychee"
|
|
||||||
script = "lychee --version"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "lints"
|
|
||||||
name = "cargo-fmt"
|
|
||||||
script = "cargo fmt --check -- --color=always"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "lints"
|
|
||||||
name = "cargo-doc"
|
|
||||||
script = """
|
|
||||||
RUSTDOCFLAGS="-D warnings" cargo doc \
|
|
||||||
--workspace \
|
|
||||||
--no-deps \
|
|
||||||
--document-private-items \
|
|
||||||
--color always
|
|
||||||
"""
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "lints"
|
|
||||||
name = "cargo-clippy"
|
|
||||||
script = "cargo clippy --workspace --all-targets --color=always -- -D warnings"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "lints"
|
|
||||||
name = "taplo-fmt"
|
|
||||||
script = "taplo fmt --check --colors always"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "lints"
|
|
||||||
name = "lychee"
|
|
||||||
script = "lychee --offline docs"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "tests"
|
|
||||||
name = "cargo"
|
|
||||||
script = """
|
|
||||||
cargo test \
|
|
||||||
--workspace \
|
|
||||||
--all-targets \
|
|
||||||
--color=always \
|
|
||||||
-- \
|
|
||||||
--color=always
|
|
||||||
"""
|
|
213
flake.lock
generated
213
flake.lock
generated
|
@ -1,70 +1,5 @@
|
||||||
{
|
{
|
||||||
"nodes": {
|
"nodes": {
|
||||||
"attic": {
|
|
||||||
"inputs": {
|
|
||||||
"crane": "crane",
|
|
||||||
"flake-compat": "flake-compat",
|
|
||||||
"flake-utils": "flake-utils",
|
|
||||||
"nixpkgs": "nixpkgs",
|
|
||||||
"nixpkgs-stable": "nixpkgs-stable"
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1707922053,
|
|
||||||
"narHash": "sha256-wSZjK+rOXn+UQiP1NbdNn5/UW6UcBxjvlqr2wh++MbM=",
|
|
||||||
"owner": "zhaofengli",
|
|
||||||
"repo": "attic",
|
|
||||||
"rev": "6eabc3f02fae3683bffab483e614bebfcd476b21",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "zhaofengli",
|
|
||||||
"ref": "main",
|
|
||||||
"repo": "attic",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"crane": {
|
|
||||||
"inputs": {
|
|
||||||
"nixpkgs": [
|
|
||||||
"attic",
|
|
||||||
"nixpkgs"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1702918879,
|
|
||||||
"narHash": "sha256-tWJqzajIvYcaRWxn+cLUB9L9Pv4dQ3Bfit/YjU5ze3g=",
|
|
||||||
"owner": "ipetkov",
|
|
||||||
"repo": "crane",
|
|
||||||
"rev": "7195c00c272fdd92fc74e7d5a0a2844b9fadb2fb",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "ipetkov",
|
|
||||||
"repo": "crane",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"crane_2": {
|
|
||||||
"inputs": {
|
|
||||||
"nixpkgs": [
|
|
||||||
"nixpkgs"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1713721181,
|
|
||||||
"narHash": "sha256-Vz1KRVTzU3ClBfyhOj8gOehZk21q58T1YsXC30V23PU=",
|
|
||||||
"owner": "ipetkov",
|
|
||||||
"repo": "crane",
|
|
||||||
"rev": "55f4939ac59ff8f89c6a4029730a2d49ea09105f",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "ipetkov",
|
|
||||||
"ref": "master",
|
|
||||||
"repo": "crane",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"fenix": {
|
"fenix": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"nixpkgs": [
|
"nixpkgs": [
|
||||||
|
@ -73,11 +8,11 @@
|
||||||
"rust-analyzer-src": "rust-analyzer-src"
|
"rust-analyzer-src": "rust-analyzer-src"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1709619709,
|
"lastModified": 1665815894,
|
||||||
"narHash": "sha256-l6EPVJfwfelWST7qWQeP6t/TDK3HHv5uUB1b2vw4mOQ=",
|
"narHash": "sha256-Vboo1L4NMGLKZKVLnOPi9OHlae7uoNyfgvyIUm+SVXE=",
|
||||||
"owner": "nix-community",
|
"owner": "nix-community",
|
||||||
"repo": "fenix",
|
"repo": "fenix",
|
||||||
"rev": "c8943ea9e98d41325ff57d4ec14736d330b321b2",
|
"rev": "2348450241a5f945f0ba07e44ecbfac2f541d7f4",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -86,45 +21,13 @@
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"flake-compat": {
|
|
||||||
"flake": false,
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1673956053,
|
|
||||||
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
|
|
||||||
"owner": "edolstra",
|
|
||||||
"repo": "flake-compat",
|
|
||||||
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "edolstra",
|
|
||||||
"repo": "flake-compat",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"flake-compat_2": {
|
|
||||||
"flake": false,
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1696426674,
|
|
||||||
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
|
|
||||||
"owner": "edolstra",
|
|
||||||
"repo": "flake-compat",
|
|
||||||
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "edolstra",
|
|
||||||
"repo": "flake-compat",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"flake-utils": {
|
"flake-utils": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1667395993,
|
"lastModified": 1659877975,
|
||||||
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
"narHash": "sha256-zllb8aq3YO3h8B/U0/J1WBgAL8EX5yWf5pMj3G0NAmc=",
|
||||||
"owner": "numtide",
|
"owner": "numtide",
|
||||||
"repo": "flake-utils",
|
"repo": "flake-utils",
|
||||||
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
"rev": "c0e246b9b83f637f4681389ecabcb2681b4f3af0",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -133,106 +36,57 @@
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"flake-utils_2": {
|
"naersk": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"systems": "systems"
|
"nixpkgs": [
|
||||||
|
"nixpkgs"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1709126324,
|
"lastModified": 1662220400,
|
||||||
"narHash": "sha256-q6EQdSeUZOG26WelxqkmR7kArjgWCdw5sfJVHPH/7j8=",
|
"narHash": "sha256-9o2OGQqu4xyLZP9K6kNe1pTHnyPz0Wr3raGYnr9AIgY=",
|
||||||
"owner": "numtide",
|
"owner": "nix-community",
|
||||||
"repo": "flake-utils",
|
"repo": "naersk",
|
||||||
"rev": "d465f4819400de7c8d874d50b982301f28a84605",
|
"rev": "6944160c19cb591eb85bbf9b2f2768a935623ed3",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "numtide",
|
"owner": "nix-community",
|
||||||
"repo": "flake-utils",
|
"repo": "naersk",
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nix-filter": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1705332318,
|
|
||||||
"narHash": "sha256-kcw1yFeJe9N4PjQji9ZeX47jg0p9A0DuU4djKvg1a7I=",
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "nix-filter",
|
|
||||||
"rev": "3449dc925982ad46246cfc36469baf66e1b64f17",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "nix-filter",
|
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1702539185,
|
"lastModified": 1665856037,
|
||||||
"narHash": "sha256-KnIRG5NMdLIpEkZTnN5zovNYc0hhXjAgv6pfd5Z4c7U=",
|
"narHash": "sha256-/RvIWnGKdTSoIq5Xc2HwPIL0TzRslzU6Rqk4Img6UNg=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "aa9d4729cbc99dabacb50e3994dcefb3ea0f7447",
|
"rev": "c95ebc5125ffffcd431df0ad8620f0926b8125b8",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"ref": "nixpkgs-unstable",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nixpkgs-stable": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1702780907,
|
|
||||||
"narHash": "sha256-blbrBBXjjZt6OKTcYX1jpe9SRof2P9ZYWPzq22tzXAA=",
|
|
||||||
"owner": "NixOS",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"rev": "1e2e384c5b7c50dbf8e9c441a9e58d85f408b01f",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "NixOS",
|
|
||||||
"ref": "nixos-23.11",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nixpkgs_2": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1709479366,
|
|
||||||
"narHash": "sha256-n6F0n8UV6lnTZbYPl1A9q1BS0p4hduAv1mGAP17CVd0=",
|
|
||||||
"owner": "NixOS",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"rev": "b8697e57f10292a6165a20f03d2f42920dfaf973",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "NixOS",
|
|
||||||
"ref": "nixos-unstable",
|
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"root": {
|
"root": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"attic": "attic",
|
|
||||||
"crane": "crane_2",
|
|
||||||
"fenix": "fenix",
|
"fenix": "fenix",
|
||||||
"flake-compat": "flake-compat_2",
|
"flake-utils": "flake-utils",
|
||||||
"flake-utils": "flake-utils_2",
|
"naersk": "naersk",
|
||||||
"nix-filter": "nix-filter",
|
"nixpkgs": "nixpkgs"
|
||||||
"nixpkgs": "nixpkgs_2"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"rust-analyzer-src": {
|
"rust-analyzer-src": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1709571018,
|
"lastModified": 1665765556,
|
||||||
"narHash": "sha256-ISFrxHxE0J5g7lDAscbK88hwaT5uewvWoma9TlFmRzM=",
|
"narHash": "sha256-w9L5j0TIB5ay4aRwzGCp8mgvGsu5dVJQvbEFutwr6xE=",
|
||||||
"owner": "rust-lang",
|
"owner": "rust-lang",
|
||||||
"repo": "rust-analyzer",
|
"repo": "rust-analyzer",
|
||||||
"rev": "9f14343f9ee24f53f17492c5f9b653427e2ad15e",
|
"rev": "018b8429cf3fa9d8aed916704e41dfedeb0f4f78",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -241,21 +95,6 @@
|
||||||
"repo": "rust-analyzer",
|
"repo": "rust-analyzer",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"systems": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1681028828,
|
|
||||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
|
||||||
"owner": "nix-systems",
|
|
||||||
"repo": "default",
|
|
||||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "nix-systems",
|
|
||||||
"repo": "default",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"root": "root",
|
"root": "root",
|
||||||
|
|
148
flake.nix
148
flake.nix
|
@ -1,115 +1,75 @@
|
||||||
{
|
{
|
||||||
inputs = {
|
inputs = {
|
||||||
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable";
|
nixpkgs.url = "github:NixOS/nixpkgs";
|
||||||
flake-utils.url = "github:numtide/flake-utils";
|
flake-utils.url = "github:numtide/flake-utils";
|
||||||
nix-filter.url = "github:numtide/nix-filter";
|
|
||||||
flake-compat = {
|
|
||||||
url = "github:edolstra/flake-compat";
|
|
||||||
flake = false;
|
|
||||||
};
|
|
||||||
|
|
||||||
fenix = {
|
fenix = {
|
||||||
url = "github:nix-community/fenix";
|
url = "github:nix-community/fenix";
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
};
|
};
|
||||||
crane = {
|
naersk = {
|
||||||
url = "github:ipetkov/crane?ref=master";
|
url = "github:nix-community/naersk";
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
};
|
};
|
||||||
attic.url = "github:zhaofengli/attic?ref=main";
|
|
||||||
};
|
};
|
||||||
|
|
||||||
outputs = inputs:
|
outputs =
|
||||||
|
{ self
|
||||||
|
, nixpkgs
|
||||||
|
, flake-utils
|
||||||
|
|
||||||
|
, fenix
|
||||||
|
, naersk
|
||||||
|
}: flake-utils.lib.eachDefaultSystem (system:
|
||||||
let
|
let
|
||||||
# Keep sorted
|
pkgs = nixpkgs.legacyPackages.${system};
|
||||||
mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: {
|
|
||||||
craneLib =
|
|
||||||
(inputs.crane.mkLib pkgs).overrideToolchain self.toolchain;
|
|
||||||
|
|
||||||
default = self.callPackage ./nix/pkgs/default {};
|
# Nix-accessible `Cargo.toml`
|
||||||
|
cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml);
|
||||||
|
|
||||||
inherit inputs;
|
# The Rust toolchain to use
|
||||||
|
toolchain = fenix.packages.${system}.toolchainOf {
|
||||||
|
# Use the Rust version defined in `Cargo.toml`
|
||||||
|
channel = cargoToml.package.rust-version;
|
||||||
|
|
||||||
oci-image = self.callPackage ./nix/pkgs/oci-image {};
|
# This will need to be updated when `package.rust-version` is changed in
|
||||||
|
# `Cargo.toml`
|
||||||
|
sha256 = "sha256-KXx+ID0y4mg2B3LHp7IyaiMrdexF6octADnAtFIOjrY=";
|
||||||
|
};
|
||||||
|
|
||||||
book = self.callPackage ./nix/pkgs/book {};
|
builder = (pkgs.callPackage naersk {
|
||||||
|
inherit (toolchain) rustc cargo;
|
||||||
rocksdb =
|
}).buildPackage;
|
||||||
let
|
|
||||||
version = "9.1.1";
|
|
||||||
in
|
|
||||||
pkgs.rocksdb.overrideAttrs (old: {
|
|
||||||
inherit version;
|
|
||||||
src = pkgs.fetchFromGitHub {
|
|
||||||
owner = "facebook";
|
|
||||||
repo = "rocksdb";
|
|
||||||
rev = "v${version}";
|
|
||||||
hash = "sha256-/Xf0bzNJPclH9IP80QNaABfhj4IAR5LycYET18VFCXc=";
|
|
||||||
};
|
|
||||||
});
|
|
||||||
|
|
||||||
shell = self.callPackage ./nix/shell.nix {};
|
|
||||||
|
|
||||||
# The Rust toolchain to use
|
|
||||||
toolchain = inputs
|
|
||||||
.fenix
|
|
||||||
.packages
|
|
||||||
.${pkgs.pkgsBuildHost.system}
|
|
||||||
.fromToolchainFile {
|
|
||||||
file = ./rust-toolchain.toml;
|
|
||||||
|
|
||||||
# See also `rust-toolchain.toml`
|
|
||||||
sha256 = "sha256-Ngiz76YP4HTY75GGdH2P+APE/DEIx2R/Dn+BwwOyzZU=";
|
|
||||||
};
|
|
||||||
});
|
|
||||||
in
|
in
|
||||||
inputs.flake-utils.lib.eachDefaultSystem (system:
|
{
|
||||||
let
|
packages.default = builder {
|
||||||
pkgs = inputs.nixpkgs.legacyPackages.${system};
|
src = ./.;
|
||||||
in
|
|
||||||
{
|
|
||||||
packages = {
|
|
||||||
default = (mkScope pkgs).default;
|
|
||||||
oci-image = (mkScope pkgs).oci-image;
|
|
||||||
book = (mkScope pkgs).book;
|
|
||||||
}
|
|
||||||
//
|
|
||||||
builtins.listToAttrs
|
|
||||||
(builtins.concatLists
|
|
||||||
(builtins.map
|
|
||||||
(crossSystem:
|
|
||||||
let
|
|
||||||
binaryName = "static-${crossSystem}";
|
|
||||||
pkgsCrossStatic =
|
|
||||||
(import inputs.nixpkgs {
|
|
||||||
inherit system;
|
|
||||||
crossSystem = {
|
|
||||||
config = crossSystem;
|
|
||||||
};
|
|
||||||
}).pkgsStatic;
|
|
||||||
in
|
|
||||||
[
|
|
||||||
# An output for a statically-linked binary
|
|
||||||
{
|
|
||||||
name = binaryName;
|
|
||||||
value = (mkScope pkgsCrossStatic).default;
|
|
||||||
}
|
|
||||||
|
|
||||||
# An output for an OCI image based on that binary
|
nativeBuildInputs = (with pkgs.rustPlatform; [
|
||||||
{
|
bindgenHook
|
||||||
name = "oci-image-${crossSystem}";
|
]);
|
||||||
value = (mkScope pkgsCrossStatic).oci-image;
|
};
|
||||||
}
|
|
||||||
]
|
|
||||||
)
|
|
||||||
[
|
|
||||||
"x86_64-unknown-linux-musl"
|
|
||||||
"aarch64-unknown-linux-musl"
|
|
||||||
]
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
devShells.default = (mkScope pkgs).shell;
|
devShells.default = pkgs.mkShell {
|
||||||
}
|
# Rust Analyzer needs to be able to find the path to default crate
|
||||||
);
|
# sources, and it can read this environment variable to do so
|
||||||
|
RUST_SRC_PATH = "${toolchain.rust-src}/lib/rustlib/src/rust/library";
|
||||||
|
|
||||||
|
# Development tools
|
||||||
|
nativeBuildInputs = (with pkgs.rustPlatform; [
|
||||||
|
bindgenHook
|
||||||
|
]) ++ (with toolchain; [
|
||||||
|
cargo
|
||||||
|
clippy
|
||||||
|
rust-src
|
||||||
|
rustc
|
||||||
|
rustfmt
|
||||||
|
]);
|
||||||
|
};
|
||||||
|
|
||||||
|
checks = {
|
||||||
|
packagesDefault = self.packages.${system}.default;
|
||||||
|
devShellsDefault = self.devShells.${system}.default;
|
||||||
|
};
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
188
nix/README.md
Normal file
188
nix/README.md
Normal file
|
@ -0,0 +1,188 @@
|
||||||
|
# Conduit for Nix/NixOS
|
||||||
|
|
||||||
|
This guide assumes you have a recent version of Nix (^2.4) installed.
|
||||||
|
|
||||||
|
Since Conduit ships as a Nix flake, you'll first need to [enable
|
||||||
|
flakes][enable_flakes].
|
||||||
|
|
||||||
|
You can now use the usual Nix commands to interact with Conduit's flake. For
|
||||||
|
example, `nix run gitlab:famedly/conduit` will run Conduit (though you'll need
|
||||||
|
to provide configuration and such manually as usual).
|
||||||
|
|
||||||
|
If your NixOS configuration is defined as a flake, you can depend on this flake
|
||||||
|
to provide a more up-to-date version than provided by `nixpkgs`. In your flake,
|
||||||
|
add the following to your `inputs`:
|
||||||
|
|
||||||
|
```nix
|
||||||
|
conduit = {
|
||||||
|
url = "gitlab:famedly/conduit";
|
||||||
|
|
||||||
|
# Assuming you have an input for nixpkgs called `nixpkgs`. If you experience
|
||||||
|
# build failures while using this, try commenting/deleting this line. This
|
||||||
|
# will probably also require you to always build from source.
|
||||||
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
Next, make sure you're passing your flake inputs to the `specialArgs` argument
|
||||||
|
of `nixpkgs.lib.nixosSystem` [as explained here][specialargs]. This guide will
|
||||||
|
assume you've named the group `flake-inputs`.
|
||||||
|
|
||||||
|
Now you can configure Conduit and a reverse proxy for it. Add the following to
|
||||||
|
a new Nix file and include it in your configuration:
|
||||||
|
|
||||||
|
```nix
|
||||||
|
{ config
|
||||||
|
, pkgs
|
||||||
|
, flake-inputs
|
||||||
|
, ...
|
||||||
|
}:
|
||||||
|
|
||||||
|
let
|
||||||
|
# You'll need to edit these values
|
||||||
|
|
||||||
|
# The hostname that will appear in your user and room IDs
|
||||||
|
server_name = "example.com";
|
||||||
|
|
||||||
|
# The hostname that Conduit actually runs on
|
||||||
|
#
|
||||||
|
# This can be the same as `server_name` if you want. This is only necessary
|
||||||
|
# when Conduit is running on a different machine than the one hosting your
|
||||||
|
# root domain. This configuration also assumes this is all running on a single
|
||||||
|
# machine, some tweaks will need to be made if this is not the case.
|
||||||
|
matrix_hostname = "matrix.${server_name}";
|
||||||
|
|
||||||
|
# An admin email for TLS certificate notifications
|
||||||
|
admin_email = "admin@${server_name}";
|
||||||
|
|
||||||
|
# These ones you can leave alone
|
||||||
|
|
||||||
|
# Build a dervation that stores the content of `${server_name}/.well-known/matrix/server`
|
||||||
|
well_known_server = pkgs.writeText "well-known-matrix-server" ''
|
||||||
|
{
|
||||||
|
"m.server": "${matrix_hostname}"
|
||||||
|
}
|
||||||
|
'';
|
||||||
|
|
||||||
|
# Build a dervation that stores the content of `${server_name}/.well-known/matrix/client`
|
||||||
|
well_known_client = pkgs.writeText "well-known-matrix-client" ''
|
||||||
|
{
|
||||||
|
"m.homeserver": {
|
||||||
|
"base_url": "https://${matrix_hostname}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
'';
|
||||||
|
in
|
||||||
|
|
||||||
|
{
|
||||||
|
# Configure Conduit itself
|
||||||
|
services.matrix-conduit = {
|
||||||
|
enable = true;
|
||||||
|
|
||||||
|
# This causes NixOS to use the flake defined in this repository instead of
|
||||||
|
# the build of Conduit built into nixpkgs.
|
||||||
|
package = flake-inputs.conduit.packages.${pkgs.system}.default;
|
||||||
|
|
||||||
|
settings.global = {
|
||||||
|
inherit server_name;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# Configure automated TLS acquisition/renewal
|
||||||
|
security.acme = {
|
||||||
|
acceptTerms = true;
|
||||||
|
defaults = {
|
||||||
|
email = admin_email;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# ACME data must be readable by the NGINX user
|
||||||
|
users.users.nginx.extraGroups = [
|
||||||
|
"acme"
|
||||||
|
];
|
||||||
|
|
||||||
|
# Configure NGINX as a reverse proxy
|
||||||
|
services.nginx = {
|
||||||
|
enable = true;
|
||||||
|
recommendedProxySettings = true;
|
||||||
|
|
||||||
|
virtualHosts = {
|
||||||
|
"${server_name}" = {
|
||||||
|
forceSSL = true;
|
||||||
|
enableACME = true;
|
||||||
|
|
||||||
|
listen = [
|
||||||
|
{
|
||||||
|
addr = "0.0.0.0";
|
||||||
|
port = 443;
|
||||||
|
ssl = true;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
addr = "0.0.0.0";
|
||||||
|
port = 8448;
|
||||||
|
ssl = true;
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
extraConfig = ''
|
||||||
|
merge_slashes off;
|
||||||
|
'';
|
||||||
|
|
||||||
|
"${matrix_hostname}" = {
|
||||||
|
forceSSL = true;
|
||||||
|
enableACME = true;
|
||||||
|
|
||||||
|
locations."/_matrix/" = {
|
||||||
|
proxyPass = "http://backend_conduit$request_uri";
|
||||||
|
proxyWebsockets = true;
|
||||||
|
extraConfig = ''
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_buffering off;
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
locations."=/.well-known/matrix/server" = {
|
||||||
|
# Use the contents of the derivation built previously
|
||||||
|
alias = "${well_known_server}";
|
||||||
|
|
||||||
|
extraConfig = ''
|
||||||
|
# Set the header since by default NGINX thinks it's just bytes
|
||||||
|
default_type application/json;
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
locations."=/.well-known/matrix/client" = {
|
||||||
|
# Use the contents of the derivation built previously
|
||||||
|
alias = "${well_known_client}";
|
||||||
|
|
||||||
|
extraConfig = ''
|
||||||
|
# Set the header since by default NGINX thinks it's just bytes
|
||||||
|
default_type application/json;
|
||||||
|
|
||||||
|
# https://matrix.org/docs/spec/client_server/r0.4.0#web-browser-clients
|
||||||
|
add_header Access-Control-Allow-Origin "*";
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
upstreams = {
|
||||||
|
"backend_conduit" = {
|
||||||
|
servers = {
|
||||||
|
"localhost:${toString config.services.matrix-conduit.settings.global.port}" = { };
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# Open firewall ports for HTTP, HTTPS, and Matrix federation
|
||||||
|
networking.firewall.allowedTCPPorts = [ 80 443 8448 ];
|
||||||
|
networking.firewall.allowedUDPPorts = [ 80 443 8448 ];
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Now you can rebuild your system configuration and you should be good to go!
|
||||||
|
|
||||||
|
[enable_flakes]: https://nixos.wiki/wiki/Flakes#Enable_flakes
|
||||||
|
|
||||||
|
[specialargs]: https://nixos.wiki/wiki/Flakes#Using_nix_flakes_with_NixOS
|
|
@ -1,34 +0,0 @@
|
||||||
# Keep sorted
|
|
||||||
{ default
|
|
||||||
, inputs
|
|
||||||
, mdbook
|
|
||||||
, stdenv
|
|
||||||
}:
|
|
||||||
|
|
||||||
stdenv.mkDerivation {
|
|
||||||
pname = "${default.pname}-book";
|
|
||||||
version = default.version;
|
|
||||||
|
|
||||||
|
|
||||||
src = let filter = inputs.nix-filter.lib; in filter {
|
|
||||||
root = inputs.self;
|
|
||||||
|
|
||||||
# Keep sorted
|
|
||||||
include = [
|
|
||||||
"book.toml"
|
|
||||||
"conduit-example.toml"
|
|
||||||
"debian/README.md"
|
|
||||||
"docs"
|
|
||||||
"README.md"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
nativeBuildInputs = [
|
|
||||||
mdbook
|
|
||||||
];
|
|
||||||
|
|
||||||
buildPhase = ''
|
|
||||||
mdbook build
|
|
||||||
mv public $out
|
|
||||||
'';
|
|
||||||
}
|
|
|
@ -1,100 +0,0 @@
|
||||||
{ lib
|
|
||||||
, pkgsBuildHost
|
|
||||||
, rust
|
|
||||||
, stdenv
|
|
||||||
}:
|
|
||||||
|
|
||||||
lib.optionalAttrs stdenv.hostPlatform.isStatic {
|
|
||||||
ROCKSDB_STATIC = "";
|
|
||||||
}
|
|
||||||
//
|
|
||||||
{
|
|
||||||
CARGO_BUILD_RUSTFLAGS =
|
|
||||||
lib.concatStringsSep
|
|
||||||
" "
|
|
||||||
([]
|
|
||||||
# This disables PIE for static builds, which isn't great in terms of
|
|
||||||
# security. Unfortunately, my hand is forced because nixpkgs'
|
|
||||||
# `libstdc++.a` is built without `-fPIE`, which precludes us from
|
|
||||||
# leaving PIE enabled.
|
|
||||||
++ lib.optionals
|
|
||||||
stdenv.hostPlatform.isStatic
|
|
||||||
[ "-C" "relocation-model=static" ]
|
|
||||||
++ lib.optionals
|
|
||||||
(stdenv.buildPlatform.config != stdenv.hostPlatform.config)
|
|
||||||
[ "-l" "c" ]
|
|
||||||
++ lib.optionals
|
|
||||||
# This check has to match the one [here][0]. We only need to set
|
|
||||||
# these flags when using a different linker. Don't ask me why, though,
|
|
||||||
# because I don't know. All I know is it breaks otherwise.
|
|
||||||
#
|
|
||||||
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L37-L40
|
|
||||||
(
|
|
||||||
# Nixpkgs doesn't check for x86_64 here but we do, because I
|
|
||||||
# observed a failure building statically for x86_64 without
|
|
||||||
# including it here. Linkers are weird.
|
|
||||||
(stdenv.hostPlatform.isAarch64 || stdenv.hostPlatform.isx86_64)
|
|
||||||
&& stdenv.hostPlatform.isStatic
|
|
||||||
&& !stdenv.isDarwin
|
|
||||||
&& !stdenv.cc.bintools.isLLVM
|
|
||||||
)
|
|
||||||
[
|
|
||||||
"-l"
|
|
||||||
"stdc++"
|
|
||||||
"-L"
|
|
||||||
"${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib"
|
|
||||||
]
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
# What follows is stolen from [here][0]. Its purpose is to properly configure
|
|
||||||
# compilers and linkers for various stages of the build, and even covers the
|
|
||||||
# case of build scripts that need native code compiled and run on the build
|
|
||||||
# platform (I think).
|
|
||||||
#
|
|
||||||
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L57-L80
|
|
||||||
//
|
|
||||||
(
|
|
||||||
let
|
|
||||||
inherit (rust.lib) envVars;
|
|
||||||
in
|
|
||||||
lib.optionalAttrs
|
|
||||||
(stdenv.targetPlatform.rust.rustcTarget
|
|
||||||
!= stdenv.hostPlatform.rust.rustcTarget)
|
|
||||||
(
|
|
||||||
let
|
|
||||||
inherit (stdenv.targetPlatform.rust) cargoEnvVarTarget;
|
|
||||||
in
|
|
||||||
{
|
|
||||||
"CC_${cargoEnvVarTarget}" = envVars.ccForTarget;
|
|
||||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget;
|
|
||||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" =
|
|
||||||
envVars.linkerForTarget;
|
|
||||||
}
|
|
||||||
)
|
|
||||||
//
|
|
||||||
(
|
|
||||||
let
|
|
||||||
inherit (stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget;
|
|
||||||
in
|
|
||||||
{
|
|
||||||
"CC_${cargoEnvVarTarget}" = envVars.ccForHost;
|
|
||||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForHost;
|
|
||||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost;
|
|
||||||
CARGO_BUILD_TARGET = rustcTarget;
|
|
||||||
}
|
|
||||||
)
|
|
||||||
//
|
|
||||||
(
|
|
||||||
let
|
|
||||||
inherit (stdenv.buildPlatform.rust) cargoEnvVarTarget;
|
|
||||||
in
|
|
||||||
{
|
|
||||||
"CC_${cargoEnvVarTarget}" = envVars.ccForBuild;
|
|
||||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild;
|
|
||||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild;
|
|
||||||
HOST_CC = "${pkgsBuildHost.stdenv.cc}/bin/cc";
|
|
||||||
HOST_CXX = "${pkgsBuildHost.stdenv.cc}/bin/c++";
|
|
||||||
}
|
|
||||||
)
|
|
||||||
)
|
|
|
@ -1,95 +0,0 @@
|
||||||
# Dependencies (keep sorted)
|
|
||||||
{ craneLib
|
|
||||||
, inputs
|
|
||||||
, lib
|
|
||||||
, pkgsBuildHost
|
|
||||||
, rocksdb
|
|
||||||
, rust
|
|
||||||
, stdenv
|
|
||||||
|
|
||||||
# Options (keep sorted)
|
|
||||||
, default-features ? true
|
|
||||||
, features ? []
|
|
||||||
, profile ? "release"
|
|
||||||
}:
|
|
||||||
|
|
||||||
let
|
|
||||||
buildDepsOnlyEnv =
|
|
||||||
let
|
|
||||||
rocksdb' = rocksdb.override {
|
|
||||||
enableJemalloc = builtins.elem "jemalloc" features;
|
|
||||||
};
|
|
||||||
in
|
|
||||||
{
|
|
||||||
NIX_OUTPATH_USED_AS_RANDOM_SEED = "randomseed"; # https://crane.dev/faq/rebuilds-bindgen.html
|
|
||||||
ROCKSDB_INCLUDE_DIR = "${rocksdb'}/include";
|
|
||||||
ROCKSDB_LIB_DIR = "${rocksdb'}/lib";
|
|
||||||
}
|
|
||||||
//
|
|
||||||
(import ./cross-compilation-env.nix {
|
|
||||||
# Keep sorted
|
|
||||||
inherit
|
|
||||||
lib
|
|
||||||
pkgsBuildHost
|
|
||||||
rust
|
|
||||||
stdenv;
|
|
||||||
});
|
|
||||||
|
|
||||||
buildPackageEnv = {
|
|
||||||
CONDUIT_VERSION_EXTRA = inputs.self.shortRev or inputs.self.dirtyShortRev;
|
|
||||||
} // buildDepsOnlyEnv;
|
|
||||||
|
|
||||||
commonAttrs = {
|
|
||||||
inherit
|
|
||||||
(craneLib.crateNameFromCargoToml {
|
|
||||||
cargoToml = "${inputs.self}/Cargo.toml";
|
|
||||||
})
|
|
||||||
pname
|
|
||||||
version;
|
|
||||||
|
|
||||||
src = let filter = inputs.nix-filter.lib; in filter {
|
|
||||||
root = inputs.self;
|
|
||||||
|
|
||||||
# Keep sorted
|
|
||||||
include = [
|
|
||||||
"Cargo.lock"
|
|
||||||
"Cargo.toml"
|
|
||||||
"src"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
nativeBuildInputs = [
|
|
||||||
# bindgen needs the build platform's libclang. Apparently due to "splicing
|
|
||||||
# weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't quite do the
|
|
||||||
# right thing here.
|
|
||||||
pkgsBuildHost.rustPlatform.bindgenHook
|
|
||||||
];
|
|
||||||
|
|
||||||
CARGO_PROFILE = profile;
|
|
||||||
};
|
|
||||||
in
|
|
||||||
|
|
||||||
craneLib.buildPackage ( commonAttrs // {
|
|
||||||
cargoArtifacts = craneLib.buildDepsOnly (commonAttrs // {
|
|
||||||
env = buildDepsOnlyEnv;
|
|
||||||
});
|
|
||||||
|
|
||||||
cargoExtraArgs = "--locked "
|
|
||||||
+ lib.optionalString
|
|
||||||
(!default-features)
|
|
||||||
"--no-default-features "
|
|
||||||
+ lib.optionalString
|
|
||||||
(features != [])
|
|
||||||
"--features " + (builtins.concatStringsSep "," features);
|
|
||||||
|
|
||||||
# This is redundant with CI
|
|
||||||
doCheck = false;
|
|
||||||
|
|
||||||
env = buildPackageEnv;
|
|
||||||
|
|
||||||
passthru = {
|
|
||||||
env = buildPackageEnv;
|
|
||||||
};
|
|
||||||
|
|
||||||
meta.mainProgram = commonAttrs.pname;
|
|
||||||
})
|
|
|
@ -1,25 +0,0 @@
|
||||||
# Keep sorted
|
|
||||||
{ default
|
|
||||||
, dockerTools
|
|
||||||
, lib
|
|
||||||
, tini
|
|
||||||
}:
|
|
||||||
|
|
||||||
dockerTools.buildImage {
|
|
||||||
name = default.pname;
|
|
||||||
tag = "next";
|
|
||||||
copyToRoot = [
|
|
||||||
dockerTools.caCertificates
|
|
||||||
];
|
|
||||||
config = {
|
|
||||||
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT)
|
|
||||||
# are handled as expected
|
|
||||||
Entrypoint = [
|
|
||||||
"${lib.getExe' tini "tini"}"
|
|
||||||
"--"
|
|
||||||
];
|
|
||||||
Cmd = [
|
|
||||||
"${lib.getExe default}"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
}
|
|
|
@ -1,61 +0,0 @@
|
||||||
# Keep sorted
|
|
||||||
{ cargo-deb
|
|
||||||
, default
|
|
||||||
, engage
|
|
||||||
, go
|
|
||||||
, inputs
|
|
||||||
, jq
|
|
||||||
, lychee
|
|
||||||
, mdbook
|
|
||||||
, mkShell
|
|
||||||
, olm
|
|
||||||
, system
|
|
||||||
, taplo
|
|
||||||
, toolchain
|
|
||||||
}:
|
|
||||||
|
|
||||||
mkShell {
|
|
||||||
env = default.env // {
|
|
||||||
# Rust Analyzer needs to be able to find the path to default crate
|
|
||||||
# sources, and it can read this environment variable to do so. The
|
|
||||||
# `rust-src` component is required in order for this to work.
|
|
||||||
RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library";
|
|
||||||
};
|
|
||||||
|
|
||||||
# Development tools
|
|
||||||
nativeBuildInputs = [
|
|
||||||
# Always use nightly rustfmt because most of its options are unstable
|
|
||||||
#
|
|
||||||
# This needs to come before `toolchain` in this list, otherwise
|
|
||||||
# `$PATH` will have stable rustfmt instead.
|
|
||||||
inputs.fenix.packages.${system}.latest.rustfmt
|
|
||||||
|
|
||||||
# rust itself
|
|
||||||
toolchain
|
|
||||||
|
|
||||||
# CI tests
|
|
||||||
engage
|
|
||||||
|
|
||||||
# format toml files
|
|
||||||
taplo
|
|
||||||
|
|
||||||
# Needed for producing Debian packages
|
|
||||||
cargo-deb
|
|
||||||
|
|
||||||
# Needed for our script for Complement
|
|
||||||
jq
|
|
||||||
|
|
||||||
# Needed for Complement
|
|
||||||
go
|
|
||||||
olm
|
|
||||||
|
|
||||||
# Needed for our script for Complement
|
|
||||||
jq
|
|
||||||
|
|
||||||
# Needed for finding broken markdown links
|
|
||||||
lychee
|
|
||||||
|
|
||||||
# Useful for editing the book locally
|
|
||||||
mdbook
|
|
||||||
] ++ default.nativeBuildInputs ;
|
|
||||||
}
|
|
|
@ -1,21 +0,0 @@
|
||||||
# This is the authoritiative configuration of this project's Rust toolchain.
|
|
||||||
#
|
|
||||||
# Other files that need upkeep when this changes:
|
|
||||||
#
|
|
||||||
# * `Cargo.toml`
|
|
||||||
# * `flake.nix`
|
|
||||||
#
|
|
||||||
# Search in those files for `rust-toolchain.toml` to find the relevant places.
|
|
||||||
# If you're having trouble making the relevant changes, bug a maintainer.
|
|
||||||
|
|
||||||
[toolchain]
|
|
||||||
channel = "1.79.0"
|
|
||||||
components = [
|
|
||||||
# For rust-analyzer
|
|
||||||
"rust-src",
|
|
||||||
]
|
|
||||||
targets = [
|
|
||||||
"aarch64-unknown-linux-musl",
|
|
||||||
"x86_64-unknown-linux-gnu",
|
|
||||||
"x86_64-unknown-linux-musl",
|
|
||||||
]
|
|
|
@ -1,2 +1,2 @@
|
||||||
imports_granularity = "Crate"
|
|
||||||
unstable_features = true
|
unstable_features = true
|
||||||
|
imports_granularity="Crate"
|
||||||
|
|
|
@ -1,35 +1,24 @@
|
||||||
use crate::{services, utils, Error, Result};
|
use crate::{services, utils, Error, Result};
|
||||||
use bytes::BytesMut;
|
use bytes::BytesMut;
|
||||||
use ruma::api::{
|
use ruma::api::{IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken};
|
||||||
appservice::Registration, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken,
|
|
||||||
};
|
|
||||||
use std::{fmt::Debug, mem, time::Duration};
|
use std::{fmt::Debug, mem, time::Duration};
|
||||||
use tracing::warn;
|
use tracing::warn;
|
||||||
|
|
||||||
/// Sends a request to an appservice
|
|
||||||
///
|
|
||||||
/// Only returns None if there is no url specified in the appservice registration file
|
|
||||||
#[tracing::instrument(skip(request))]
|
#[tracing::instrument(skip(request))]
|
||||||
pub(crate) async fn send_request<T>(
|
pub(crate) async fn send_request<T: OutgoingRequest>(
|
||||||
registration: Registration,
|
registration: serde_yaml::Value,
|
||||||
request: T,
|
request: T,
|
||||||
) -> Result<Option<T::IncomingResponse>>
|
) -> Result<T::IncomingResponse>
|
||||||
where
|
where
|
||||||
T: OutgoingRequest + Debug,
|
T: Debug,
|
||||||
{
|
{
|
||||||
let destination = match registration.url {
|
let destination = registration.get("url").unwrap().as_str().unwrap();
|
||||||
Some(url) => url,
|
let hs_token = registration.get("hs_token").unwrap().as_str().unwrap();
|
||||||
None => {
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let hs_token = registration.hs_token.as_str();
|
|
||||||
|
|
||||||
let mut http_request = request
|
let mut http_request = request
|
||||||
.try_into_http_request::<BytesMut>(
|
.try_into_http_request::<BytesMut>(
|
||||||
&destination,
|
destination,
|
||||||
SendAccessToken::IfRequired(hs_token),
|
SendAccessToken::IfRequired(""),
|
||||||
&[MatrixVersion::V1_0],
|
&[MatrixVersion::V1_0],
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -50,7 +39,8 @@ where
|
||||||
);
|
);
|
||||||
*http_request.uri_mut() = parts.try_into().expect("our manipulation is always valid");
|
*http_request.uri_mut() = parts.try_into().expect("our manipulation is always valid");
|
||||||
|
|
||||||
let mut reqwest_request = reqwest::Request::try_from(http_request)?;
|
let mut reqwest_request = reqwest::Request::try_from(http_request)
|
||||||
|
.expect("all http requests are valid reqwest requests");
|
||||||
|
|
||||||
*reqwest_request.timeout_mut() = Some(Duration::from_secs(30));
|
*reqwest_request.timeout_mut() = Some(Duration::from_secs(30));
|
||||||
|
|
||||||
|
@ -65,7 +55,9 @@ where
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!(
|
warn!(
|
||||||
"Could not send request to appservice {:?} at {}: {}",
|
"Could not send request to appservice {:?} at {}: {}",
|
||||||
registration.id, destination, e
|
registration.get("id"),
|
||||||
|
destination,
|
||||||
|
e
|
||||||
);
|
);
|
||||||
return Err(e.into());
|
return Err(e.into());
|
||||||
}
|
}
|
||||||
|
@ -103,8 +95,7 @@ where
|
||||||
.body(body)
|
.body(body)
|
||||||
.expect("reqwest body is valid http body"),
|
.expect("reqwest body is valid http body"),
|
||||||
);
|
);
|
||||||
|
response.map_err(|_| {
|
||||||
response.map(Some).map_err(|_| {
|
|
||||||
warn!(
|
warn!(
|
||||||
"Appservice returned invalid response bytes {}\n{}",
|
"Appservice returned invalid response bytes {}\n{}",
|
||||||
destination, url
|
destination, url
|
||||||
|
|
|
@ -3,8 +3,7 @@ use crate::{api::client_server, services, utils, Error, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
account::{
|
account::{
|
||||||
change_password, deactivate, get_3pids, get_username_availability,
|
change_password, deactivate, get_3pids, get_username_availability, register,
|
||||||
register::{self, LoginType},
|
|
||||||
request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn,
|
request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn,
|
||||||
whoami, ThirdPartyIdRemovalStatus,
|
whoami, ThirdPartyIdRemovalStatus,
|
||||||
},
|
},
|
||||||
|
@ -75,9 +74,9 @@ pub async fn get_register_available_route(
|
||||||
/// - Creates a new account and populates it with default account data
|
/// - Creates a new account and populates it with default account data
|
||||||
/// - If `inhibit_login` is false: Creates a device and returns device id and access_token
|
/// - If `inhibit_login` is false: Creates a device and returns device id and access_token
|
||||||
pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<register::v3::Response> {
|
pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<register::v3::Response> {
|
||||||
if !services().globals.allow_registration().await && body.appservice_info.is_none() {
|
if !services().globals.allow_registration() && !body.from_appservice {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::forbidden(),
|
ErrorKind::Forbidden,
|
||||||
"Registration has been disabled.",
|
"Registration has been disabled.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -119,56 +118,18 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
if body.body.login_type == Some(LoginType::ApplicationService) {
|
|
||||||
if let Some(ref info) = body.appservice_info {
|
|
||||||
if !info.is_user_match(&user_id) {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"User is not in namespace.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::MissingToken,
|
|
||||||
"Missing appservice token.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
} else if services().appservice.is_exclusive_user_id(&user_id).await {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"User id reserved by appservice.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
// UIAA
|
// UIAA
|
||||||
let mut uiaainfo;
|
let mut uiaainfo = UiaaInfo {
|
||||||
let skip_auth = if services().globals.config.registration_token.is_some() {
|
flows: vec![AuthFlow {
|
||||||
// Registration token required
|
stages: vec![AuthType::Dummy],
|
||||||
uiaainfo = UiaaInfo {
|
}],
|
||||||
flows: vec![AuthFlow {
|
completed: Vec::new(),
|
||||||
stages: vec![AuthType::RegistrationToken],
|
params: Default::default(),
|
||||||
}],
|
session: None,
|
||||||
completed: Vec::new(),
|
auth_error: None,
|
||||||
params: Default::default(),
|
|
||||||
session: None,
|
|
||||||
auth_error: None,
|
|
||||||
};
|
|
||||||
body.appservice_info.is_some()
|
|
||||||
} else {
|
|
||||||
// No registration token necessary, but clients must still go through the flow
|
|
||||||
uiaainfo = UiaaInfo {
|
|
||||||
flows: vec![AuthFlow {
|
|
||||||
stages: vec![AuthType::Dummy],
|
|
||||||
}],
|
|
||||||
completed: Vec::new(),
|
|
||||||
params: Default::default(),
|
|
||||||
session: None,
|
|
||||||
auth_error: None,
|
|
||||||
};
|
|
||||||
body.appservice_info.is_some() || is_guest
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if !skip_auth {
|
if !body.from_appservice {
|
||||||
if let Some(auth) = &body.auth {
|
if let Some(auth) = &body.auth {
|
||||||
let (worked, uiaainfo) = services().uiaa.try_auth(
|
let (worked, uiaainfo) = services().uiaa.try_auth(
|
||||||
&UserId::parse_with_server_name("", services().globals.server_name())
|
&UserId::parse_with_server_name("", services().globals.server_name())
|
||||||
|
@ -261,32 +222,21 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
info!("New user {} registered on this server.", user_id);
|
info!("New user {} registered on this server.", user_id);
|
||||||
if body.appservice_info.is_none() && !is_guest {
|
services()
|
||||||
services()
|
.admin
|
||||||
.admin
|
.send_message(RoomMessageEventContent::notice_plain(format!(
|
||||||
.send_message(RoomMessageEventContent::notice_plain(format!(
|
"New user {user_id} registered on this server."
|
||||||
"New user {user_id} registered on this server."
|
)));
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
// If this is the first real user, grant them admin privileges
|
// If this is the first real user, grant them admin privileges
|
||||||
// Note: the server user, @conduit:servername, is generated first
|
// Note: the server user, @conduit:servername, is generated first
|
||||||
if !is_guest {
|
if services().users.count()? == 2 {
|
||||||
if let Some(admin_room) = services().admin.get_admin_room()? {
|
services()
|
||||||
if services()
|
.admin
|
||||||
.rooms
|
.make_user_admin(&user_id, displayname)
|
||||||
.state_cache
|
.await?;
|
||||||
.room_joined_count(&admin_room)?
|
|
||||||
== Some(1)
|
|
||||||
{
|
|
||||||
services()
|
|
||||||
.admin
|
|
||||||
.make_user_admin(&user_id, displayname)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
warn!("Granting {} admin privileges as the first user", user_id);
|
warn!("Granting {} admin privileges as the first user", user_id);
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(register::v3::Response {
|
Ok(register::v3::Response {
|
||||||
|
@ -315,11 +265,7 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
|
||||||
pub async fn change_password_route(
|
pub async fn change_password_route(
|
||||||
body: Ruma<change_password::v3::Request>,
|
body: Ruma<change_password::v3::Request>,
|
||||||
) -> Result<change_password::v3::Response> {
|
) -> Result<change_password::v3::Response> {
|
||||||
let sender_user = body
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
.sender_user
|
|
||||||
.as_ref()
|
|
||||||
// In the future password changes could be performed with UIA with 3PIDs, but we don't support that currently
|
|
||||||
.ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?;
|
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let mut uiaainfo = UiaaInfo {
|
let mut uiaainfo = UiaaInfo {
|
||||||
|
@ -389,7 +335,7 @@ pub async fn whoami_route(body: Ruma<whoami::v3::Request>) -> Result<whoami::v3:
|
||||||
Ok(whoami::v3::Response {
|
Ok(whoami::v3::Response {
|
||||||
user_id: sender_user.clone(),
|
user_id: sender_user.clone(),
|
||||||
device_id,
|
device_id,
|
||||||
is_guest: services().users.is_deactivated(sender_user)? && body.appservice_info.is_none(),
|
is_guest: services().users.is_deactivated(sender_user)? && !body.from_appservice,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -406,11 +352,7 @@ pub async fn whoami_route(body: Ruma<whoami::v3::Request>) -> Result<whoami::v3:
|
||||||
pub async fn deactivate_route(
|
pub async fn deactivate_route(
|
||||||
body: Ruma<deactivate::v3::Request>,
|
body: Ruma<deactivate::v3::Request>,
|
||||||
) -> Result<deactivate::v3::Response> {
|
) -> Result<deactivate::v3::Response> {
|
||||||
let sender_user = body
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
.sender_user
|
|
||||||
.as_ref()
|
|
||||||
// In the future password changes could be performed with UIA with SSO, but we don't support that currently
|
|
||||||
.ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?;
|
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let mut uiaainfo = UiaaInfo {
|
let mut uiaainfo = UiaaInfo {
|
||||||
|
@ -483,7 +425,7 @@ pub async fn request_3pid_management_token_via_email_route(
|
||||||
) -> Result<request_3pid_management_token_via_email::v3::Response> {
|
) -> Result<request_3pid_management_token_via_email::v3::Response> {
|
||||||
Err(Error::BadRequest(
|
Err(Error::BadRequest(
|
||||||
ErrorKind::ThreepidDenied,
|
ErrorKind::ThreepidDenied,
|
||||||
"Third party identifiers are currently unsupported by this server implementation",
|
"Third party identifier is not allowed",
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -497,6 +439,6 @@ pub async fn request_3pid_management_token_via_msisdn_route(
|
||||||
) -> Result<request_3pid_management_token_via_msisdn::v3::Response> {
|
) -> Result<request_3pid_management_token_via_msisdn::v3::Response> {
|
||||||
Err(Error::BadRequest(
|
Err(Error::BadRequest(
|
||||||
ErrorKind::ThreepidDenied,
|
ErrorKind::ThreepidDenied,
|
||||||
"Third party identifiers are currently unsupported by this server implementation",
|
"Third party identifier is not allowed",
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
use crate::{services, Error, Result, Ruma};
|
use crate::{services, Error, Result, Ruma};
|
||||||
use rand::seq::SliceRandom;
|
use regex::Regex;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{
|
api::{
|
||||||
appservice,
|
appservice,
|
||||||
|
@ -18,8 +18,6 @@ use ruma::{
|
||||||
pub async fn create_alias_route(
|
pub async fn create_alias_route(
|
||||||
body: Ruma<create_alias::v3::Request>,
|
body: Ruma<create_alias::v3::Request>,
|
||||||
) -> Result<create_alias::v3::Response> {
|
) -> Result<create_alias::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if body.room_alias.server_name() != services().globals.server_name() {
|
if body.room_alias.server_name() != services().globals.server_name() {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
|
@ -27,24 +25,6 @@ pub async fn create_alias_route(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(ref info) = body.appservice_info {
|
|
||||||
if !info.aliases.is_match(body.room_alias.as_str()) {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"Room alias is not in namespace.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
} else if services()
|
|
||||||
.appservice
|
|
||||||
.is_exclusive_alias(&body.room_alias)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"Room alias reserved by appservice.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
if services()
|
if services()
|
||||||
.rooms
|
.rooms
|
||||||
.alias
|
.alias
|
||||||
|
@ -57,7 +37,7 @@ pub async fn create_alias_route(
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.alias
|
.alias
|
||||||
.set_alias(&body.room_alias, &body.room_id, sender_user)?;
|
.set_alias(&body.room_alias, &body.room_id)?;
|
||||||
|
|
||||||
Ok(create_alias::v3::Response::new())
|
Ok(create_alias::v3::Response::new())
|
||||||
}
|
}
|
||||||
|
@ -66,12 +46,11 @@ pub async fn create_alias_route(
|
||||||
///
|
///
|
||||||
/// Deletes a room alias from this server.
|
/// Deletes a room alias from this server.
|
||||||
///
|
///
|
||||||
|
/// - TODO: additional access control checks
|
||||||
/// - TODO: Update canonical alias event
|
/// - TODO: Update canonical alias event
|
||||||
pub async fn delete_alias_route(
|
pub async fn delete_alias_route(
|
||||||
body: Ruma<delete_alias::v3::Request>,
|
body: Ruma<delete_alias::v3::Request>,
|
||||||
) -> Result<delete_alias::v3::Response> {
|
) -> Result<delete_alias::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if body.room_alias.server_name() != services().globals.server_name() {
|
if body.room_alias.server_name() != services().globals.server_name() {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
|
@ -79,28 +58,7 @@ pub async fn delete_alias_route(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(ref info) = body.appservice_info {
|
services().rooms.alias.remove_alias(&body.room_alias)?;
|
||||||
if !info.aliases.is_match(body.room_alias.as_str()) {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"Room alias is not in namespace.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
} else if services()
|
|
||||||
.appservice
|
|
||||||
.is_exclusive_alias(&body.room_alias)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"Room alias reserved by appservice.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.alias
|
|
||||||
.remove_alias(&body.room_alias, sender_user)?;
|
|
||||||
|
|
||||||
// TODO: update alt_aliases?
|
// TODO: update alt_aliases?
|
||||||
|
|
||||||
|
@ -132,30 +90,41 @@ pub(crate) async fn get_alias_helper(
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let mut servers = response.servers;
|
return Ok(get_alias::v3::Response::new(
|
||||||
servers.shuffle(&mut rand::thread_rng());
|
response.room_id,
|
||||||
|
response.servers,
|
||||||
return Ok(get_alias::v3::Response::new(response.room_id, servers));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut room_id = None;
|
let mut room_id = None;
|
||||||
match services().rooms.alias.resolve_local_alias(&room_alias)? {
|
match services().rooms.alias.resolve_local_alias(&room_alias)? {
|
||||||
Some(r) => room_id = Some(r),
|
Some(r) => room_id = Some(r),
|
||||||
None => {
|
None => {
|
||||||
for appservice in services().appservice.read().await.values() {
|
for (_id, registration) in services().appservice.all()? {
|
||||||
if appservice.aliases.is_match(room_alias.as_str())
|
let aliases = registration
|
||||||
&& matches!(
|
.get("namespaces")
|
||||||
services()
|
.and_then(|ns| ns.get("aliases"))
|
||||||
.sending
|
.and_then(|aliases| aliases.as_sequence())
|
||||||
.send_appservice_request(
|
.map_or_else(Vec::new, |aliases| {
|
||||||
appservice.registration.clone(),
|
aliases
|
||||||
appservice::query::query_room_alias::v1::Request {
|
.iter()
|
||||||
room_alias: room_alias.clone(),
|
.filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok())
|
||||||
},
|
.collect::<Vec<_>>()
|
||||||
)
|
});
|
||||||
.await,
|
|
||||||
Ok(Some(_opt_result))
|
if aliases
|
||||||
)
|
.iter()
|
||||||
|
.any(|aliases| aliases.is_match(room_alias.as_str()))
|
||||||
|
&& services()
|
||||||
|
.sending
|
||||||
|
.send_appservice_request(
|
||||||
|
registration,
|
||||||
|
appservice::query::query_room_alias::v1::Request {
|
||||||
|
room_alias: room_alias.clone(),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.is_ok()
|
||||||
{
|
{
|
||||||
room_id = Some(
|
room_id = Some(
|
||||||
services()
|
services()
|
||||||
|
|
|
@ -75,7 +75,7 @@ pub async fn get_global_account_data_route(
|
||||||
|
|
||||||
let event: Box<RawJsonValue> = services()
|
let event: Box<RawJsonValue> = services()
|
||||||
.account_data
|
.account_data
|
||||||
.get(None, sender_user, body.event_type.to_string().into())?
|
.get(None, sender_user, body.event_type.clone().into())?
|
||||||
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?;
|
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?;
|
||||||
|
|
||||||
let account_data = serde_json::from_str::<ExtractGlobalEventContent>(event.get())
|
let account_data = serde_json::from_str::<ExtractGlobalEventContent>(event.get())
|
||||||
|
@ -95,7 +95,11 @@ pub async fn get_room_account_data_route(
|
||||||
|
|
||||||
let event: Box<RawJsonValue> = services()
|
let event: Box<RawJsonValue> = services()
|
||||||
.account_data
|
.account_data
|
||||||
.get(Some(&body.room_id), sender_user, body.event_type.clone())?
|
.get(
|
||||||
|
Some(&body.room_id),
|
||||||
|
sender_user,
|
||||||
|
body.event_type.clone().into(),
|
||||||
|
)?
|
||||||
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?;
|
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?;
|
||||||
|
|
||||||
let account_data = serde_json::from_str::<ExtractRoomEventContent>(event.get())
|
let account_data = serde_json::from_str::<ExtractRoomEventContent>(event.get())
|
||||||
|
|
|
@ -3,7 +3,7 @@ use ruma::{
|
||||||
api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions},
|
api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions},
|
||||||
events::StateEventType,
|
events::StateEventType,
|
||||||
};
|
};
|
||||||
use std::collections::HashSet;
|
use std::{collections::HashSet, convert::TryFrom};
|
||||||
use tracing::error;
|
use tracing::error;
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/context`
|
/// # `GET /_matrix/client/r0/rooms/{roomId}/context`
|
||||||
|
@ -27,35 +27,36 @@ pub async fn get_context_route(
|
||||||
|
|
||||||
let mut lazy_loaded = HashSet::new();
|
let mut lazy_loaded = HashSet::new();
|
||||||
|
|
||||||
let base_token = services()
|
let base_pdu_id = services()
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
.get_pdu_count(&body.event_id)?
|
.get_pdu_id(&body.event_id)?
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::NotFound,
|
ErrorKind::NotFound,
|
||||||
"Base event id not found.",
|
"Base event id not found.",
|
||||||
))?;
|
))?;
|
||||||
|
|
||||||
let base_event =
|
let base_token = services().rooms.timeline.pdu_count(&base_pdu_id)?;
|
||||||
services()
|
|
||||||
.rooms
|
let base_event = services()
|
||||||
.timeline
|
.rooms
|
||||||
.get_pdu(&body.event_id)?
|
.timeline
|
||||||
.ok_or(Error::BadRequest(
|
.get_pdu_from_id(&base_pdu_id)?
|
||||||
ErrorKind::NotFound,
|
.ok_or(Error::BadRequest(
|
||||||
"Base event not found.",
|
ErrorKind::NotFound,
|
||||||
))?;
|
"Base event not found.",
|
||||||
|
))?;
|
||||||
|
|
||||||
let room_id = base_event.room_id.clone();
|
let room_id = base_event.room_id.clone();
|
||||||
|
|
||||||
if !services()
|
if !services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_cache
|
||||||
.user_can_see_event(sender_user, &room_id, &body.event_id)?
|
.is_joined(sender_user, &room_id)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::forbidden(),
|
ErrorKind::Forbidden,
|
||||||
"You don't have permission to view this event.",
|
"You don't have permission to view this room.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -69,24 +70,19 @@ pub async fn get_context_route(
|
||||||
lazy_loaded.insert(base_event.sender.as_str().to_owned());
|
lazy_loaded.insert(base_event.sender.as_str().to_owned());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use limit with maximum 100
|
|
||||||
let limit = u64::from(body.limit).min(100) as usize;
|
|
||||||
|
|
||||||
let base_event = base_event.to_room_event();
|
let base_event = base_event.to_room_event();
|
||||||
|
|
||||||
let events_before: Vec<_> = services()
|
let events_before: Vec<_> = services()
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
.pdus_until(sender_user, &room_id, base_token)?
|
.pdus_until(sender_user, &room_id, base_token)?
|
||||||
.take(limit / 2)
|
.take(
|
||||||
|
u32::try_from(body.limit).map_err(|_| {
|
||||||
|
Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.")
|
||||||
|
})? as usize
|
||||||
|
/ 2,
|
||||||
|
)
|
||||||
.filter_map(|r| r.ok()) // Remove buggy events
|
.filter_map(|r| r.ok()) // Remove buggy events
|
||||||
.filter(|(_, pdu)| {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.user_can_see_event(sender_user, &room_id, &pdu.event_id)
|
|
||||||
.unwrap_or(false)
|
|
||||||
})
|
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
for (_, event) in &events_before {
|
for (_, event) in &events_before {
|
||||||
|
@ -103,8 +99,8 @@ pub async fn get_context_route(
|
||||||
|
|
||||||
let start_token = events_before
|
let start_token = events_before
|
||||||
.last()
|
.last()
|
||||||
.map(|(count, _)| count.stringify())
|
.and_then(|(pdu_id, _)| services().rooms.timeline.pdu_count(pdu_id).ok())
|
||||||
.unwrap_or_else(|| base_token.stringify());
|
.map(|count| count.to_string());
|
||||||
|
|
||||||
let events_before: Vec<_> = events_before
|
let events_before: Vec<_> = events_before
|
||||||
.into_iter()
|
.into_iter()
|
||||||
|
@ -115,15 +111,13 @@ pub async fn get_context_route(
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
.pdus_after(sender_user, &room_id, base_token)?
|
.pdus_after(sender_user, &room_id, base_token)?
|
||||||
.take(limit / 2)
|
.take(
|
||||||
|
u32::try_from(body.limit).map_err(|_| {
|
||||||
|
Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.")
|
||||||
|
})? as usize
|
||||||
|
/ 2,
|
||||||
|
)
|
||||||
.filter_map(|r| r.ok()) // Remove buggy events
|
.filter_map(|r| r.ok()) // Remove buggy events
|
||||||
.filter(|(_, pdu)| {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.user_can_see_event(sender_user, &room_id, &pdu.event_id)
|
|
||||||
.unwrap_or(false)
|
|
||||||
})
|
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
for (_, event) in &events_after {
|
for (_, event) in &events_after {
|
||||||
|
@ -159,8 +153,8 @@ pub async fn get_context_route(
|
||||||
|
|
||||||
let end_token = events_after
|
let end_token = events_after
|
||||||
.last()
|
.last()
|
||||||
.map(|(count, _)| count.stringify())
|
.and_then(|(pdu_id, _)| services().rooms.timeline.pdu_count(pdu_id).ok())
|
||||||
.unwrap_or_else(|| base_token.stringify());
|
.map(|count| count.to_string());
|
||||||
|
|
||||||
let events_after: Vec<_> = events_after
|
let events_after: Vec<_> = events_after
|
||||||
.into_iter()
|
.into_iter()
|
||||||
|
@ -197,8 +191,8 @@ pub async fn get_context_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
let resp = get_context::v3::Response {
|
let resp = get_context::v3::Response {
|
||||||
start: Some(start_token),
|
start: start_token,
|
||||||
end: Some(end_token),
|
end: end_token,
|
||||||
events_before,
|
events_before,
|
||||||
event: Some(base_event),
|
event: Some(base_event),
|
||||||
events_after,
|
events_after,
|
||||||
|
|
|
@ -53,7 +53,7 @@ pub async fn update_device_route(
|
||||||
.get_device_metadata(sender_user, &body.device_id)?
|
.get_device_metadata(sender_user, &body.device_id)?
|
||||||
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?;
|
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?;
|
||||||
|
|
||||||
device.display_name.clone_from(&body.display_name);
|
device.display_name = body.display_name.clone();
|
||||||
|
|
||||||
services()
|
services()
|
||||||
.users
|
.users
|
||||||
|
|
|
@ -20,6 +20,7 @@ use ruma::{
|
||||||
guest_access::{GuestAccess, RoomGuestAccessEventContent},
|
guest_access::{GuestAccess, RoomGuestAccessEventContent},
|
||||||
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
|
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
|
||||||
join_rules::{JoinRule, RoomJoinRulesEventContent},
|
join_rules::{JoinRule, RoomJoinRulesEventContent},
|
||||||
|
name::RoomNameEventContent,
|
||||||
topic::RoomTopicEventContent,
|
topic::RoomTopicEventContent,
|
||||||
},
|
},
|
||||||
StateEventType,
|
StateEventType,
|
||||||
|
@ -202,7 +203,17 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
Error::bad_database("Invalid canonical alias event in database.")
|
Error::bad_database("Invalid canonical alias event in database.")
|
||||||
})
|
})
|
||||||
})?,
|
})?,
|
||||||
name: services().rooms.state_accessor.get_name(&room_id)?,
|
name: services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.room_state_get(&room_id, &StateEventType::RoomName, "")?
|
||||||
|
.map_or(Ok(None), |s| {
|
||||||
|
serde_json::from_str(s.content.get())
|
||||||
|
.map(|c: RoomNameEventContent| c.name)
|
||||||
|
.map_err(|_| {
|
||||||
|
Error::bad_database("Invalid room name event in database.")
|
||||||
|
})
|
||||||
|
})?,
|
||||||
num_joined_members: services()
|
num_joined_members: services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
|
@ -221,7 +232,6 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map(|c: RoomTopicEventContent| Some(c.topic))
|
.map(|c: RoomTopicEventContent| Some(c.topic))
|
||||||
.map_err(|_| {
|
.map_err(|_| {
|
||||||
error!("Invalid room topic event in database for room {}", room_id);
|
|
||||||
Error::bad_database("Invalid room topic event in database.")
|
Error::bad_database("Invalid room topic event in database.")
|
||||||
})
|
})
|
||||||
})?,
|
})?,
|
||||||
|
|
|
@ -17,11 +17,7 @@ use ruma::{
|
||||||
DeviceKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId,
|
DeviceKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId,
|
||||||
};
|
};
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use std::{
|
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||||
collections::{hash_map, BTreeMap, HashMap, HashSet},
|
|
||||||
time::{Duration, Instant},
|
|
||||||
};
|
|
||||||
use tracing::debug;
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/keys/upload`
|
/// # `POST /_matrix/client/r0/keys/upload`
|
||||||
///
|
///
|
||||||
|
@ -136,7 +132,6 @@ pub async fn upload_signing_keys_route(
|
||||||
master_key,
|
master_key,
|
||||||
&body.self_signing_key,
|
&body.self_signing_key,
|
||||||
&body.user_signing_key,
|
&body.user_signing_key,
|
||||||
true, // notify so that other users see the new keys
|
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -156,6 +151,18 @@ pub async fn upload_signatures_route(
|
||||||
let key = serde_json::to_value(key)
|
let key = serde_json::to_value(key)
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid key JSON"))?;
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid key JSON"))?;
|
||||||
|
|
||||||
|
let is_signed_key = match key.get("usage") {
|
||||||
|
Some(usage) => usage
|
||||||
|
.as_array()
|
||||||
|
.map(|usage| !usage.contains(&json!("master")))
|
||||||
|
.unwrap_or(false),
|
||||||
|
None => true,
|
||||||
|
};
|
||||||
|
|
||||||
|
if !is_signed_key {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
for signature in key
|
for signature in key
|
||||||
.get("signatures")
|
.get("signatures")
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
|
@ -316,17 +323,15 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(master_key) =
|
if let Some(master_key) = services()
|
||||||
services()
|
.users
|
||||||
.users
|
.get_master_key(user_id, &allowed_signatures)?
|
||||||
.get_master_key(sender_user, user_id, &allowed_signatures)?
|
|
||||||
{
|
{
|
||||||
master_keys.insert(user_id.to_owned(), master_key);
|
master_keys.insert(user_id.to_owned(), master_key);
|
||||||
}
|
}
|
||||||
if let Some(self_signing_key) =
|
if let Some(self_signing_key) = services()
|
||||||
services()
|
.users
|
||||||
.users
|
.get_self_signing_key(user_id, &allowed_signatures)?
|
||||||
.get_self_signing_key(sender_user, user_id, &allowed_signatures)?
|
|
||||||
{
|
{
|
||||||
self_signing_keys.insert(user_id.to_owned(), self_signing_key);
|
self_signing_keys.insert(user_id.to_owned(), self_signing_key);
|
||||||
}
|
}
|
||||||
|
@ -339,99 +344,36 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
|
|
||||||
let mut failures = BTreeMap::new();
|
let mut failures = BTreeMap::new();
|
||||||
|
|
||||||
let back_off = |id| async {
|
|
||||||
match services()
|
|
||||||
.globals
|
|
||||||
.bad_query_ratelimiter
|
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.entry(id)
|
|
||||||
{
|
|
||||||
hash_map::Entry::Vacant(e) => {
|
|
||||||
e.insert((Instant::now(), 1));
|
|
||||||
}
|
|
||||||
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut futures: FuturesUnordered<_> = get_over_federation
|
let mut futures: FuturesUnordered<_> = get_over_federation
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(server, vec)| async move {
|
.map(|(server, vec)| async move {
|
||||||
if let Some((time, tries)) = services()
|
|
||||||
.globals
|
|
||||||
.bad_query_ratelimiter
|
|
||||||
.read()
|
|
||||||
.await
|
|
||||||
.get(server)
|
|
||||||
{
|
|
||||||
// Exponential backoff
|
|
||||||
let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries);
|
|
||||||
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
|
|
||||||
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
|
|
||||||
}
|
|
||||||
|
|
||||||
if time.elapsed() < min_elapsed_duration {
|
|
||||||
debug!("Backing off query from {:?}", server);
|
|
||||||
return (
|
|
||||||
server,
|
|
||||||
Err(Error::BadServerResponse("bad query, still backing off")),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut device_keys_input_fed = BTreeMap::new();
|
let mut device_keys_input_fed = BTreeMap::new();
|
||||||
for (user_id, keys) in vec {
|
for (user_id, keys) in vec {
|
||||||
device_keys_input_fed.insert(user_id.to_owned(), keys.clone());
|
device_keys_input_fed.insert(user_id.to_owned(), keys.clone());
|
||||||
}
|
}
|
||||||
(
|
(
|
||||||
server,
|
server,
|
||||||
tokio::time::timeout(
|
services()
|
||||||
Duration::from_secs(25),
|
.sending
|
||||||
services().sending.send_federation_request(
|
.send_federation_request(
|
||||||
server,
|
server,
|
||||||
federation::keys::get_keys::v1::Request {
|
federation::keys::get_keys::v1::Request {
|
||||||
device_keys: device_keys_input_fed,
|
device_keys: device_keys_input_fed,
|
||||||
},
|
},
|
||||||
),
|
)
|
||||||
)
|
.await,
|
||||||
.await
|
|
||||||
.map_err(|_e| Error::BadServerResponse("Query took too long")),
|
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
while let Some((server, response)) = futures.next().await {
|
while let Some((server, response)) = futures.next().await {
|
||||||
match response {
|
match response {
|
||||||
Ok(Ok(response)) => {
|
Ok(response) => {
|
||||||
for (user, masterkey) in response.master_keys {
|
master_keys.extend(response.master_keys);
|
||||||
let (master_key_id, mut master_key) =
|
|
||||||
services().users.parse_master_key(&user, &masterkey)?;
|
|
||||||
|
|
||||||
if let Some(our_master_key) = services().users.get_key(
|
|
||||||
&master_key_id,
|
|
||||||
sender_user,
|
|
||||||
&user,
|
|
||||||
&allowed_signatures,
|
|
||||||
)? {
|
|
||||||
let (_, our_master_key) =
|
|
||||||
services().users.parse_master_key(&user, &our_master_key)?;
|
|
||||||
master_key.signatures.extend(our_master_key.signatures);
|
|
||||||
}
|
|
||||||
let json = serde_json::to_value(master_key).expect("to_value always works");
|
|
||||||
let raw = serde_json::from_value(json).expect("Raw::from_value always works");
|
|
||||||
services().users.add_cross_signing_keys(
|
|
||||||
&user, &raw, &None, &None,
|
|
||||||
false, // Dont notify. A notification would trigger another key request resulting in an endless loop
|
|
||||||
)?;
|
|
||||||
master_keys.insert(user, raw);
|
|
||||||
}
|
|
||||||
|
|
||||||
self_signing_keys.extend(response.self_signing_keys);
|
self_signing_keys.extend(response.self_signing_keys);
|
||||||
device_keys.extend(response.device_keys);
|
device_keys.extend(response.device_keys);
|
||||||
}
|
}
|
||||||
_ => {
|
Err(_e) => {
|
||||||
back_off(server.to_owned()).await;
|
|
||||||
|
|
||||||
failures.insert(server.to_string(), json!({}));
|
failures.insert(server.to_string(), json!({}));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,24 +1,10 @@
|
||||||
// Unauthenticated media is deprecated
|
|
||||||
#![allow(deprecated)]
|
|
||||||
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use crate::{service::media::FileMeta, services, utils, Error, Result, Ruma};
|
use crate::{service::media::FileMeta, services, utils, Error, Result, Ruma};
|
||||||
use http::header::{CONTENT_DISPOSITION, CONTENT_TYPE};
|
use ruma::api::client::{
|
||||||
use ruma::{
|
error::ErrorKind,
|
||||||
api::{
|
media::{
|
||||||
client::{
|
create_content, get_content, get_content_as_filename, get_content_thumbnail,
|
||||||
authenticated_media::{
|
get_media_config,
|
||||||
get_content, get_content_as_filename, get_content_thumbnail, get_media_config,
|
|
||||||
},
|
|
||||||
error::ErrorKind,
|
|
||||||
media::{self, create_content},
|
|
||||||
},
|
|
||||||
federation::authenticated_media::{self as federation_media, FileOrLocation},
|
|
||||||
},
|
},
|
||||||
http_headers::{ContentDisposition, ContentDispositionType},
|
|
||||||
media::Method,
|
|
||||||
ServerName, UInt,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const MXC_LENGTH: usize = 32;
|
const MXC_LENGTH: usize = 32;
|
||||||
|
@ -27,20 +13,9 @@ const MXC_LENGTH: usize = 32;
|
||||||
///
|
///
|
||||||
/// Returns max upload size.
|
/// Returns max upload size.
|
||||||
pub async fn get_media_config_route(
|
pub async fn get_media_config_route(
|
||||||
_body: Ruma<media::get_media_config::v3::Request>,
|
_body: Ruma<get_media_config::v3::Request>,
|
||||||
) -> Result<media::get_media_config::v3::Response> {
|
) -> Result<get_media_config::v3::Response> {
|
||||||
Ok(media::get_media_config::v3::Response {
|
Ok(get_media_config::v3::Response {
|
||||||
upload_size: services().globals.max_request_size().into(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/v1/media/config`
|
|
||||||
///
|
|
||||||
/// Returns max upload size.
|
|
||||||
pub async fn get_media_config_auth_route(
|
|
||||||
_body: Ruma<get_media_config::v1::Request>,
|
|
||||||
) -> Result<get_media_config::v1::Response> {
|
|
||||||
Ok(get_media_config::v1::Response {
|
|
||||||
upload_size: services().globals.max_request_size().into(),
|
upload_size: services().globals.max_request_size().into(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -64,84 +39,43 @@ pub async fn create_content_route(
|
||||||
.media
|
.media
|
||||||
.create(
|
.create(
|
||||||
mxc.clone(),
|
mxc.clone(),
|
||||||
Some(
|
body.filename
|
||||||
ContentDisposition::new(ContentDispositionType::Inline)
|
.as_ref()
|
||||||
.with_filename(body.filename.clone()),
|
.map(|filename| "inline; filename=".to_owned() + filename)
|
||||||
),
|
.as_deref(),
|
||||||
body.content_type.as_deref(),
|
body.content_type.as_deref(),
|
||||||
&body.file,
|
&body.file,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(create_content::v3::Response {
|
Ok(create_content::v3::Response {
|
||||||
content_uri: mxc.into(),
|
content_uri: mxc.try_into().expect("Invalid mxc:// URI"),
|
||||||
blurhash: None,
|
blurhash: None,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_remote_content(
|
pub async fn get_remote_content(
|
||||||
mxc: &str,
|
mxc: &str,
|
||||||
server_name: &ServerName,
|
server_name: &ruma::ServerName,
|
||||||
media_id: String,
|
media_id: String,
|
||||||
) -> Result<get_content::v1::Response, Error> {
|
) -> Result<get_content::v3::Response, Error> {
|
||||||
let content_response = match services()
|
let content_response = services()
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
server_name,
|
server_name,
|
||||||
federation_media::get_content::v1::Request {
|
get_content::v3::Request {
|
||||||
media_id: media_id.clone(),
|
allow_remote: false,
|
||||||
timeout_ms: Duration::from_secs(20),
|
server_name: server_name.to_owned(),
|
||||||
|
media_id,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await
|
.await?;
|
||||||
{
|
|
||||||
Ok(federation_media::get_content::v1::Response {
|
|
||||||
metadata: _,
|
|
||||||
content: FileOrLocation::File(content),
|
|
||||||
}) => get_content::v1::Response {
|
|
||||||
file: content.file,
|
|
||||||
content_type: content.content_type,
|
|
||||||
content_disposition: content.content_disposition,
|
|
||||||
},
|
|
||||||
|
|
||||||
Ok(federation_media::get_content::v1::Response {
|
|
||||||
metadata: _,
|
|
||||||
content: FileOrLocation::Location(url),
|
|
||||||
}) => get_location_content(url).await?,
|
|
||||||
Err(Error::BadRequest(ErrorKind::Unrecognized, _)) => {
|
|
||||||
let media::get_content::v3::Response {
|
|
||||||
file,
|
|
||||||
content_type,
|
|
||||||
content_disposition,
|
|
||||||
..
|
|
||||||
} = services()
|
|
||||||
.sending
|
|
||||||
.send_federation_request(
|
|
||||||
server_name,
|
|
||||||
media::get_content::v3::Request {
|
|
||||||
server_name: server_name.to_owned(),
|
|
||||||
media_id,
|
|
||||||
timeout_ms: Duration::from_secs(20),
|
|
||||||
allow_remote: false,
|
|
||||||
allow_redirect: true,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
get_content::v1::Response {
|
|
||||||
file,
|
|
||||||
content_type,
|
|
||||||
content_disposition,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => return Err(e),
|
|
||||||
};
|
|
||||||
|
|
||||||
services()
|
services()
|
||||||
.media
|
.media
|
||||||
.create(
|
.create(
|
||||||
mxc.to_owned(),
|
mxc.to_owned(),
|
||||||
content_response.content_disposition.clone(),
|
content_response.content_disposition.as_deref(),
|
||||||
content_response.content_type.as_deref(),
|
content_response.content_type.as_deref(),
|
||||||
&content_response.file,
|
&content_response.file,
|
||||||
)
|
)
|
||||||
|
@ -156,58 +90,26 @@ pub async fn get_remote_content(
|
||||||
///
|
///
|
||||||
/// - Only allows federation if `allow_remote` is true
|
/// - Only allows federation if `allow_remote` is true
|
||||||
pub async fn get_content_route(
|
pub async fn get_content_route(
|
||||||
body: Ruma<media::get_content::v3::Request>,
|
body: Ruma<get_content::v3::Request>,
|
||||||
) -> Result<media::get_content::v3::Response> {
|
) -> Result<get_content::v3::Response> {
|
||||||
let get_content::v1::Response {
|
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
||||||
file,
|
|
||||||
content_disposition,
|
|
||||||
content_type,
|
|
||||||
} = get_content(&body.server_name, body.media_id.clone(), body.allow_remote).await?;
|
|
||||||
|
|
||||||
Ok(media::get_content::v3::Response {
|
if let Some(FileMeta {
|
||||||
file,
|
|
||||||
content_type,
|
|
||||||
content_disposition,
|
|
||||||
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}`
|
|
||||||
///
|
|
||||||
/// Load media from our server or over federation.
|
|
||||||
pub async fn get_content_auth_route(
|
|
||||||
body: Ruma<get_content::v1::Request>,
|
|
||||||
) -> Result<get_content::v1::Response> {
|
|
||||||
get_content(&body.server_name, body.media_id.clone(), true).await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_content(
|
|
||||||
server_name: &ServerName,
|
|
||||||
media_id: String,
|
|
||||||
allow_remote: bool,
|
|
||||||
) -> Result<get_content::v1::Response, Error> {
|
|
||||||
let mxc = format!("mxc://{}/{}", server_name, media_id);
|
|
||||||
|
|
||||||
if let Ok(Some(FileMeta {
|
|
||||||
content_disposition,
|
content_disposition,
|
||||||
content_type,
|
content_type,
|
||||||
file,
|
file,
|
||||||
})) = services().media.get(mxc.clone()).await
|
}) = services().media.get(mxc.clone()).await?
|
||||||
{
|
{
|
||||||
Ok(get_content::v1::Response {
|
Ok(get_content::v3::Response {
|
||||||
file,
|
file,
|
||||||
content_type,
|
content_type,
|
||||||
content_disposition: Some(content_disposition),
|
content_disposition,
|
||||||
|
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||||
})
|
})
|
||||||
} else if server_name != services().globals.server_name() && allow_remote {
|
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
|
||||||
let remote_content_response =
|
let remote_content_response =
|
||||||
get_remote_content(&mxc, server_name, media_id.clone()).await?;
|
get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?;
|
||||||
|
Ok(remote_content_response)
|
||||||
Ok(get_content::v1::Response {
|
|
||||||
content_disposition: remote_content_response.content_disposition,
|
|
||||||
content_type: remote_content_response.content_type,
|
|
||||||
file: remote_content_response.file,
|
|
||||||
})
|
|
||||||
} else {
|
} else {
|
||||||
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
||||||
}
|
}
|
||||||
|
@ -219,74 +121,31 @@ async fn get_content(
|
||||||
///
|
///
|
||||||
/// - Only allows federation if `allow_remote` is true
|
/// - Only allows federation if `allow_remote` is true
|
||||||
pub async fn get_content_as_filename_route(
|
pub async fn get_content_as_filename_route(
|
||||||
body: Ruma<media::get_content_as_filename::v3::Request>,
|
body: Ruma<get_content_as_filename::v3::Request>,
|
||||||
) -> Result<media::get_content_as_filename::v3::Response> {
|
) -> Result<get_content_as_filename::v3::Response> {
|
||||||
let get_content_as_filename::v1::Response {
|
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
||||||
file,
|
|
||||||
|
if let Some(FileMeta {
|
||||||
|
content_disposition: _,
|
||||||
content_type,
|
content_type,
|
||||||
content_disposition,
|
|
||||||
} = get_content_as_filename(
|
|
||||||
&body.server_name,
|
|
||||||
body.media_id.clone(),
|
|
||||||
body.filename.clone(),
|
|
||||||
body.allow_remote,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(media::get_content_as_filename::v3::Response {
|
|
||||||
file,
|
file,
|
||||||
content_type,
|
}) = services().media.get(mxc.clone()).await?
|
||||||
content_disposition,
|
|
||||||
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}/{fileName}`
|
|
||||||
///
|
|
||||||
/// Load media from our server or over federation, permitting desired filename.
|
|
||||||
pub async fn get_content_as_filename_auth_route(
|
|
||||||
body: Ruma<get_content_as_filename::v1::Request>,
|
|
||||||
) -> Result<get_content_as_filename::v1::Response, Error> {
|
|
||||||
get_content_as_filename(
|
|
||||||
&body.server_name,
|
|
||||||
body.media_id.clone(),
|
|
||||||
body.filename.clone(),
|
|
||||||
true,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_content_as_filename(
|
|
||||||
server_name: &ServerName,
|
|
||||||
media_id: String,
|
|
||||||
filename: String,
|
|
||||||
allow_remote: bool,
|
|
||||||
) -> Result<get_content_as_filename::v1::Response, Error> {
|
|
||||||
let mxc = format!("mxc://{}/{}", server_name, media_id);
|
|
||||||
|
|
||||||
if let Ok(Some(FileMeta {
|
|
||||||
file, content_type, ..
|
|
||||||
})) = services().media.get(mxc.clone()).await
|
|
||||||
{
|
{
|
||||||
Ok(get_content_as_filename::v1::Response {
|
Ok(get_content_as_filename::v3::Response {
|
||||||
file,
|
file,
|
||||||
content_type,
|
content_type,
|
||||||
content_disposition: Some(
|
content_disposition: Some(format!("inline; filename={}", body.filename)),
|
||||||
ContentDisposition::new(ContentDispositionType::Inline)
|
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||||
.with_filename(Some(filename.clone())),
|
|
||||||
),
|
|
||||||
})
|
})
|
||||||
} else if server_name != services().globals.server_name() && allow_remote {
|
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
|
||||||
let remote_content_response =
|
let remote_content_response =
|
||||||
get_remote_content(&mxc, server_name, media_id.clone()).await?;
|
get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?;
|
||||||
|
|
||||||
Ok(get_content_as_filename::v1::Response {
|
Ok(get_content_as_filename::v3::Response {
|
||||||
content_disposition: Some(
|
content_disposition: Some(format!("inline: filename={}", body.filename)),
|
||||||
ContentDisposition::new(ContentDispositionType::Inline)
|
|
||||||
.with_filename(Some(filename.clone())),
|
|
||||||
),
|
|
||||||
content_type: remote_content_response.content_type,
|
content_type: remote_content_response.content_type,
|
||||||
file: remote_content_response.file,
|
file: remote_content_response.file,
|
||||||
|
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
||||||
|
@ -299,169 +158,60 @@ async fn get_content_as_filename(
|
||||||
///
|
///
|
||||||
/// - Only allows federation if `allow_remote` is true
|
/// - Only allows federation if `allow_remote` is true
|
||||||
pub async fn get_content_thumbnail_route(
|
pub async fn get_content_thumbnail_route(
|
||||||
body: Ruma<media::get_content_thumbnail::v3::Request>,
|
body: Ruma<get_content_thumbnail::v3::Request>,
|
||||||
) -> Result<media::get_content_thumbnail::v3::Response> {
|
) -> Result<get_content_thumbnail::v3::Response> {
|
||||||
let get_content_thumbnail::v1::Response { file, content_type } = get_content_thumbnail(
|
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
||||||
&body.server_name,
|
|
||||||
body.media_id.clone(),
|
|
||||||
body.height,
|
|
||||||
body.width,
|
|
||||||
body.method.clone(),
|
|
||||||
body.animated,
|
|
||||||
body.allow_remote,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(media::get_content_thumbnail::v3::Response {
|
if let Some(FileMeta {
|
||||||
file,
|
content_type, file, ..
|
||||||
content_type,
|
}) = services()
|
||||||
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/v1/media/thumbnail/{serverName}/{mediaId}`
|
|
||||||
///
|
|
||||||
/// Load media thumbnail from our server or over federation.
|
|
||||||
pub async fn get_content_thumbnail_auth_route(
|
|
||||||
body: Ruma<get_content_thumbnail::v1::Request>,
|
|
||||||
) -> Result<get_content_thumbnail::v1::Response> {
|
|
||||||
get_content_thumbnail(
|
|
||||||
&body.server_name,
|
|
||||||
body.media_id.clone(),
|
|
||||||
body.height,
|
|
||||||
body.width,
|
|
||||||
body.method.clone(),
|
|
||||||
body.animated,
|
|
||||||
true,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_content_thumbnail(
|
|
||||||
server_name: &ServerName,
|
|
||||||
media_id: String,
|
|
||||||
height: UInt,
|
|
||||||
width: UInt,
|
|
||||||
method: Option<Method>,
|
|
||||||
animated: Option<bool>,
|
|
||||||
allow_remote: bool,
|
|
||||||
) -> Result<get_content_thumbnail::v1::Response, Error> {
|
|
||||||
let mxc = format!("mxc://{}/{}", server_name, media_id);
|
|
||||||
|
|
||||||
if let Ok(Some(FileMeta {
|
|
||||||
file, content_type, ..
|
|
||||||
})) = services()
|
|
||||||
.media
|
.media
|
||||||
.get_thumbnail(
|
.get_thumbnail(
|
||||||
mxc.clone(),
|
mxc.clone(),
|
||||||
width
|
body.width
|
||||||
.try_into()
|
.try_into()
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
|
||||||
height
|
body.height
|
||||||
.try_into()
|
.try_into()
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Height is invalid."))?,
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
|
||||||
)
|
)
|
||||||
.await
|
.await?
|
||||||
{
|
{
|
||||||
Ok(get_content_thumbnail::v1::Response { file, content_type })
|
Ok(get_content_thumbnail::v3::Response {
|
||||||
} else if server_name != services().globals.server_name() && allow_remote {
|
file,
|
||||||
let thumbnail_response = match services()
|
content_type,
|
||||||
|
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||||
|
})
|
||||||
|
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
|
||||||
|
let get_thumbnail_response = services()
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
server_name,
|
&body.server_name,
|
||||||
federation_media::get_content_thumbnail::v1::Request {
|
get_content_thumbnail::v3::Request {
|
||||||
height,
|
allow_remote: false,
|
||||||
width,
|
height: body.height,
|
||||||
method: method.clone(),
|
width: body.width,
|
||||||
media_id: media_id.clone(),
|
method: body.method.clone(),
|
||||||
timeout_ms: Duration::from_secs(20),
|
server_name: body.server_name.clone(),
|
||||||
animated,
|
media_id: body.media_id.clone(),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await
|
.await?;
|
||||||
{
|
|
||||||
Ok(federation_media::get_content_thumbnail::v1::Response {
|
|
||||||
metadata: _,
|
|
||||||
content: FileOrLocation::File(content),
|
|
||||||
}) => get_content_thumbnail::v1::Response {
|
|
||||||
file: content.file,
|
|
||||||
content_type: content.content_type,
|
|
||||||
},
|
|
||||||
|
|
||||||
Ok(federation_media::get_content_thumbnail::v1::Response {
|
|
||||||
metadata: _,
|
|
||||||
content: FileOrLocation::Location(url),
|
|
||||||
}) => {
|
|
||||||
let get_content::v1::Response {
|
|
||||||
file, content_type, ..
|
|
||||||
} = get_location_content(url).await?;
|
|
||||||
|
|
||||||
get_content_thumbnail::v1::Response { file, content_type }
|
|
||||||
}
|
|
||||||
Err(Error::BadRequest(ErrorKind::Unrecognized, _)) => {
|
|
||||||
let media::get_content_thumbnail::v3::Response {
|
|
||||||
file, content_type, ..
|
|
||||||
} = services()
|
|
||||||
.sending
|
|
||||||
.send_federation_request(
|
|
||||||
server_name,
|
|
||||||
media::get_content_thumbnail::v3::Request {
|
|
||||||
height,
|
|
||||||
width,
|
|
||||||
method: method.clone(),
|
|
||||||
server_name: server_name.to_owned(),
|
|
||||||
media_id: media_id.clone(),
|
|
||||||
timeout_ms: Duration::from_secs(20),
|
|
||||||
allow_redirect: false,
|
|
||||||
animated,
|
|
||||||
allow_remote: false,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
get_content_thumbnail::v1::Response { file, content_type }
|
|
||||||
}
|
|
||||||
Err(e) => return Err(e),
|
|
||||||
};
|
|
||||||
|
|
||||||
services()
|
services()
|
||||||
.media
|
.media
|
||||||
.upload_thumbnail(
|
.upload_thumbnail(
|
||||||
mxc,
|
mxc,
|
||||||
thumbnail_response.content_type.as_deref(),
|
None,
|
||||||
width.try_into().expect("all UInts are valid u32s"),
|
get_thumbnail_response.content_type.as_deref(),
|
||||||
height.try_into().expect("all UInts are valid u32s"),
|
body.width.try_into().expect("all UInts are valid u32s"),
|
||||||
&thumbnail_response.file,
|
body.height.try_into().expect("all UInts are valid u32s"),
|
||||||
|
&get_thumbnail_response.file,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(thumbnail_response)
|
Ok(get_thumbnail_response)
|
||||||
} else {
|
} else {
|
||||||
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_location_content(url: String) -> Result<get_content::v1::Response, Error> {
|
|
||||||
let client = services().globals.default_client();
|
|
||||||
let response = client.get(url).send().await?;
|
|
||||||
let headers = response.headers();
|
|
||||||
|
|
||||||
let content_type = headers
|
|
||||||
.get(CONTENT_TYPE)
|
|
||||||
.and_then(|header| header.to_str().ok())
|
|
||||||
.map(ToOwned::to_owned);
|
|
||||||
|
|
||||||
let content_disposition = headers
|
|
||||||
.get(CONTENT_DISPOSITION)
|
|
||||||
.map(|header| header.as_bytes())
|
|
||||||
.map(TryFrom::try_from)
|
|
||||||
.and_then(Result::ok);
|
|
||||||
|
|
||||||
let file = response.bytes().await?.to_vec();
|
|
||||||
|
|
||||||
Ok(get_content::v1::Response {
|
|
||||||
file,
|
|
||||||
content_type,
|
|
||||||
content_disposition,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,13 +1,10 @@
|
||||||
use crate::{
|
use crate::{service::pdu::PduBuilder, services, utils, Error, Result, Ruma};
|
||||||
service::{pdu::PduBuilder, rooms::timeline::PduCount},
|
|
||||||
services, utils, Error, Result, Ruma,
|
|
||||||
};
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
message::{get_message_events, send_message_event},
|
message::{get_message_events, send_message_event},
|
||||||
},
|
},
|
||||||
events::{StateEventType, TimelineEventType},
|
events::{RoomEventType, StateEventType},
|
||||||
};
|
};
|
||||||
use std::{
|
use std::{
|
||||||
collections::{BTreeMap, HashSet},
|
collections::{BTreeMap, HashSet},
|
||||||
|
@ -32,18 +29,18 @@ pub async fn send_message_event_route(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.await
|
.unwrap()
|
||||||
.entry(body.room_id.clone())
|
.entry(body.room_id.clone())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
// Forbid m.room.encrypted if encryption is disabled
|
// Forbid m.room.encrypted if encryption is disabled
|
||||||
if TimelineEventType::RoomEncrypted == body.event_type.to_string().into()
|
if RoomEventType::RoomEncrypted == body.event_type.to_string().into()
|
||||||
&& !services().globals.allow_encryption()
|
&& !services().globals.allow_encryption()
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::forbidden(),
|
ErrorKind::Forbidden,
|
||||||
"Encryption has been disabled",
|
"Encryption has been disabled",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -73,28 +70,19 @@ pub async fn send_message_event_route(
|
||||||
let mut unsigned = BTreeMap::new();
|
let mut unsigned = BTreeMap::new();
|
||||||
unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into());
|
unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into());
|
||||||
|
|
||||||
let event_id = services()
|
let event_id = services().rooms.timeline.build_and_append_pdu(
|
||||||
.rooms
|
PduBuilder {
|
||||||
.timeline
|
event_type: body.event_type.to_string().into(),
|
||||||
.build_and_append_pdu(
|
content: serde_json::from_str(body.body.body.json().get())
|
||||||
PduBuilder {
|
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?,
|
||||||
event_type: body.event_type.to_string().into(),
|
unsigned: Some(unsigned),
|
||||||
content: serde_json::from_str(body.body.body.json().get())
|
state_key: None,
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?,
|
redacts: None,
|
||||||
unsigned: Some(unsigned),
|
},
|
||||||
state_key: None,
|
sender_user,
|
||||||
redacts: None,
|
&body.room_id,
|
||||||
timestamp: if body.appservice_info.is_some() {
|
&state_lock,
|
||||||
body.timestamp
|
)?;
|
||||||
} else {
|
|
||||||
None
|
|
||||||
},
|
|
||||||
},
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
services().transaction_ids.add_txnid(
|
services().transaction_ids.add_txnid(
|
||||||
sender_user,
|
sender_user,
|
||||||
|
@ -122,26 +110,39 @@ pub async fn get_message_events_route(
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if !services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.is_joined(sender_user, &body.room_id)?
|
||||||
|
{
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::Forbidden,
|
||||||
|
"You don't have permission to view this room.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
let from = match body.from.clone() {
|
let from = match body.from.clone() {
|
||||||
Some(from) => PduCount::try_from_string(&from)?,
|
Some(from) => from
|
||||||
|
.parse()
|
||||||
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from` value."))?,
|
||||||
|
|
||||||
None => match body.dir {
|
None => match body.dir {
|
||||||
ruma::api::Direction::Forward => PduCount::min(),
|
ruma::api::client::Direction::Forward => 0,
|
||||||
ruma::api::Direction::Backward => PduCount::max(),
|
ruma::api::client::Direction::Backward => u64::MAX,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
let to = body
|
let to = body.to.as_ref().map(|t| t.parse());
|
||||||
.to
|
|
||||||
.as_ref()
|
|
||||||
.and_then(|t| PduCount::try_from_string(t).ok());
|
|
||||||
|
|
||||||
services()
|
services().rooms.lazy_loading.lazy_load_confirm_delivery(
|
||||||
.rooms
|
sender_user,
|
||||||
.lazy_loading
|
sender_device,
|
||||||
.lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)
|
&body.room_id,
|
||||||
.await?;
|
from,
|
||||||
|
)?;
|
||||||
|
|
||||||
let limit = u64::from(body.limit).min(100) as usize;
|
// Use limit or else 10
|
||||||
|
let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize);
|
||||||
|
|
||||||
let next_token;
|
let next_token;
|
||||||
|
|
||||||
|
@ -150,21 +151,22 @@ pub async fn get_message_events_route(
|
||||||
let mut lazy_loaded = HashSet::new();
|
let mut lazy_loaded = HashSet::new();
|
||||||
|
|
||||||
match body.dir {
|
match body.dir {
|
||||||
ruma::api::Direction::Forward => {
|
ruma::api::client::Direction::Forward => {
|
||||||
let events_after: Vec<_> = services()
|
let events_after: Vec<_> = services()
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
.pdus_after(sender_user, &body.room_id, from)?
|
.pdus_after(sender_user, &body.room_id, from)?
|
||||||
.take(limit)
|
.take(limit)
|
||||||
.filter_map(|r| r.ok()) // Filter out buggy events
|
.filter_map(|r| r.ok()) // Filter out buggy events
|
||||||
.filter(|(_, pdu)| {
|
.filter_map(|(pdu_id, pdu)| {
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.timeline
|
||||||
.user_can_see_event(sender_user, &body.room_id, &pdu.event_id)
|
.pdu_count(&pdu_id)
|
||||||
.unwrap_or(false)
|
.map(|pdu_count| (pdu_count, pdu))
|
||||||
|
.ok()
|
||||||
})
|
})
|
||||||
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
.take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to`
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
for (_, event) in &events_after {
|
for (_, event) in &events_after {
|
||||||
|
@ -190,30 +192,26 @@ pub async fn get_message_events_route(
|
||||||
.map(|(_, pdu)| pdu.to_room_event())
|
.map(|(_, pdu)| pdu.to_room_event())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
resp.start = from.stringify();
|
resp.start = from.to_string();
|
||||||
resp.end = next_token.map(|count| count.stringify());
|
resp.end = next_token.map(|count| count.to_string());
|
||||||
resp.chunk = events_after;
|
resp.chunk = events_after;
|
||||||
}
|
}
|
||||||
ruma::api::Direction::Backward => {
|
ruma::api::client::Direction::Backward => {
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.backfill_if_required(&body.room_id, from)
|
|
||||||
.await?;
|
|
||||||
let events_before: Vec<_> = services()
|
let events_before: Vec<_> = services()
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
.pdus_until(sender_user, &body.room_id, from)?
|
.pdus_until(sender_user, &body.room_id, from)?
|
||||||
.take(limit)
|
.take(limit)
|
||||||
.filter_map(|r| r.ok()) // Filter out buggy events
|
.filter_map(|r| r.ok()) // Filter out buggy events
|
||||||
.filter(|(_, pdu)| {
|
.filter_map(|(pdu_id, pdu)| {
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.timeline
|
||||||
.user_can_see_event(sender_user, &body.room_id, &pdu.event_id)
|
.pdu_count(&pdu_id)
|
||||||
.unwrap_or(false)
|
.map(|pdu_count| (pdu_count, pdu))
|
||||||
|
.ok()
|
||||||
})
|
})
|
||||||
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
.take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to`
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
for (_, event) in &events_before {
|
for (_, event) in &events_before {
|
||||||
|
@ -239,8 +237,8 @@ pub async fn get_message_events_route(
|
||||||
.map(|(_, pdu)| pdu.to_room_event())
|
.map(|(_, pdu)| pdu.to_room_event())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
resp.start = from.stringify();
|
resp.start = from.to_string();
|
||||||
resp.end = next_token.map(|count| count.stringify());
|
resp.end = next_token.map(|count| count.to_string());
|
||||||
resp.chunk = events_before;
|
resp.chunk = events_before;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,29 +11,24 @@ mod keys;
|
||||||
mod media;
|
mod media;
|
||||||
mod membership;
|
mod membership;
|
||||||
mod message;
|
mod message;
|
||||||
mod openid;
|
|
||||||
mod presence;
|
mod presence;
|
||||||
mod profile;
|
mod profile;
|
||||||
mod push;
|
mod push;
|
||||||
mod read_marker;
|
mod read_marker;
|
||||||
mod redact;
|
mod redact;
|
||||||
mod relations;
|
|
||||||
mod report;
|
mod report;
|
||||||
mod room;
|
mod room;
|
||||||
mod search;
|
mod search;
|
||||||
mod session;
|
mod session;
|
||||||
mod space;
|
|
||||||
mod state;
|
mod state;
|
||||||
mod sync;
|
mod sync;
|
||||||
mod tag;
|
mod tag;
|
||||||
mod thirdparty;
|
mod thirdparty;
|
||||||
mod threads;
|
|
||||||
mod to_device;
|
mod to_device;
|
||||||
mod typing;
|
mod typing;
|
||||||
mod unversioned;
|
mod unversioned;
|
||||||
mod user_directory;
|
mod user_directory;
|
||||||
mod voip;
|
mod voip;
|
||||||
mod well_known;
|
|
||||||
|
|
||||||
pub use account::*;
|
pub use account::*;
|
||||||
pub use alias::*;
|
pub use alias::*;
|
||||||
|
@ -48,29 +43,24 @@ pub use keys::*;
|
||||||
pub use media::*;
|
pub use media::*;
|
||||||
pub use membership::*;
|
pub use membership::*;
|
||||||
pub use message::*;
|
pub use message::*;
|
||||||
pub use openid::*;
|
|
||||||
pub use presence::*;
|
pub use presence::*;
|
||||||
pub use profile::*;
|
pub use profile::*;
|
||||||
pub use push::*;
|
pub use push::*;
|
||||||
pub use read_marker::*;
|
pub use read_marker::*;
|
||||||
pub use redact::*;
|
pub use redact::*;
|
||||||
pub use relations::*;
|
|
||||||
pub use report::*;
|
pub use report::*;
|
||||||
pub use room::*;
|
pub use room::*;
|
||||||
pub use search::*;
|
pub use search::*;
|
||||||
pub use session::*;
|
pub use session::*;
|
||||||
pub use space::*;
|
|
||||||
pub use state::*;
|
pub use state::*;
|
||||||
pub use sync::*;
|
pub use sync::*;
|
||||||
pub use tag::*;
|
pub use tag::*;
|
||||||
pub use thirdparty::*;
|
pub use thirdparty::*;
|
||||||
pub use threads::*;
|
|
||||||
pub use to_device::*;
|
pub use to_device::*;
|
||||||
pub use typing::*;
|
pub use typing::*;
|
||||||
pub use unversioned::*;
|
pub use unversioned::*;
|
||||||
pub use user_directory::*;
|
pub use user_directory::*;
|
||||||
pub use voip::*;
|
pub use voip::*;
|
||||||
pub use well_known::*;
|
|
||||||
|
|
||||||
pub const DEVICE_ID_LENGTH: usize = 10;
|
pub const DEVICE_ID_LENGTH: usize = 10;
|
||||||
pub const TOKEN_LENGTH: usize = 32;
|
pub const TOKEN_LENGTH: usize = 32;
|
||||||
|
|
|
@ -1,23 +0,0 @@
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use ruma::{api::client::account, authentication::TokenType};
|
|
||||||
|
|
||||||
use crate::{services, Result, Ruma};
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/user/{userId}/openid/request_token`
|
|
||||||
///
|
|
||||||
/// Request an OpenID token to verify identity with third-party services.
|
|
||||||
///
|
|
||||||
/// - The token generated is only valid for the OpenID API.
|
|
||||||
pub async fn create_openid_token_route(
|
|
||||||
body: Ruma<account::request_openid_token::v3::Request>,
|
|
||||||
) -> Result<account::request_openid_token::v3::Response> {
|
|
||||||
let (access_token, expires_in) = services().users.create_openid_token(&body.user_id)?;
|
|
||||||
|
|
||||||
Ok(account::request_openid_token::v3::Response {
|
|
||||||
access_token,
|
|
||||||
token_type: TokenType::Bearer,
|
|
||||||
matrix_server_name: services().globals.server_name().to_owned(),
|
|
||||||
expires_in: Duration::from_secs(expires_in),
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,8 +1,5 @@
|
||||||
use crate::{services, utils, Error, Result, Ruma};
|
use crate::{services, utils, Result, Ruma};
|
||||||
use ruma::api::client::{
|
use ruma::api::client::presence::{get_presence, set_presence};
|
||||||
error::ErrorKind,
|
|
||||||
presence::{get_presence, set_presence},
|
|
||||||
};
|
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/presence/{userId}/status`
|
/// # `PUT /_matrix/client/r0/presence/{userId}/status`
|
||||||
|
@ -82,9 +79,6 @@ pub async fn get_presence_route(
|
||||||
presence: presence.content.presence,
|
presence: presence.content.presence,
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
Err(Error::BadRequest(
|
todo!();
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Presence state for this user was not found",
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,7 @@ use ruma::{
|
||||||
},
|
},
|
||||||
federation::{self, query::get_profile_information::v1::ProfileField},
|
federation::{self, query::get_profile_information::v1::ProfileField},
|
||||||
},
|
},
|
||||||
events::{room::member::RoomMemberEventContent, StateEventType, TimelineEventType},
|
events::{room::member::RoomMemberEventContent, RoomEventType, StateEventType},
|
||||||
};
|
};
|
||||||
use serde_json::value::to_raw_value;
|
use serde_json::value::to_raw_value;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
@ -37,10 +37,9 @@ pub async fn set_displayname_route(
|
||||||
.map(|room_id| {
|
.map(|room_id| {
|
||||||
Ok::<_, Error>((
|
Ok::<_, Error>((
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: TimelineEventType::RoomMember,
|
event_type: RoomEventType::RoomMember,
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
content: to_raw_value(&RoomMemberEventContent {
|
||||||
displayname: body.displayname.clone(),
|
displayname: body.displayname.clone(),
|
||||||
join_authorized_via_users_server: None,
|
|
||||||
..serde_json::from_str(
|
..serde_json::from_str(
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -65,7 +64,6 @@ pub async fn set_displayname_route(
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some(sender_user.to_string()),
|
state_key: Some(sender_user.to_string()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
timestamp: None,
|
|
||||||
},
|
},
|
||||||
room_id,
|
room_id,
|
||||||
))
|
))
|
||||||
|
@ -79,17 +77,18 @@ pub async fn set_displayname_route(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.await
|
.unwrap()
|
||||||
.entry(room_id.clone())
|
.entry(room_id.clone())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
let _ = services()
|
let _ = services().rooms.timeline.build_and_append_pdu(
|
||||||
.rooms
|
pdu_builder,
|
||||||
.timeline
|
sender_user,
|
||||||
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)
|
&room_id,
|
||||||
.await;
|
&state_lock,
|
||||||
|
);
|
||||||
|
|
||||||
// Presence update
|
// Presence update
|
||||||
services().rooms.edus.presence.update_presence(
|
services().rooms.edus.presence.update_presence(
|
||||||
|
@ -173,10 +172,9 @@ pub async fn set_avatar_url_route(
|
||||||
.map(|room_id| {
|
.map(|room_id| {
|
||||||
Ok::<_, Error>((
|
Ok::<_, Error>((
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: TimelineEventType::RoomMember,
|
event_type: RoomEventType::RoomMember,
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
content: to_raw_value(&RoomMemberEventContent {
|
||||||
avatar_url: body.avatar_url.clone(),
|
avatar_url: body.avatar_url.clone(),
|
||||||
join_authorized_via_users_server: None,
|
|
||||||
..serde_json::from_str(
|
..serde_json::from_str(
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -201,7 +199,6 @@ pub async fn set_avatar_url_route(
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some(sender_user.to_string()),
|
state_key: Some(sender_user.to_string()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
timestamp: None,
|
|
||||||
},
|
},
|
||||||
room_id,
|
room_id,
|
||||||
))
|
))
|
||||||
|
@ -215,17 +212,18 @@ pub async fn set_avatar_url_route(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.await
|
.unwrap()
|
||||||
.entry(room_id.clone())
|
.entry(room_id.clone())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
let _ = services()
|
let _ = services().rooms.timeline.build_and_append_pdu(
|
||||||
.rooms
|
pdu_builder,
|
||||||
.timeline
|
sender_user,
|
||||||
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)
|
&room_id,
|
||||||
.await;
|
&state_lock,
|
||||||
|
);
|
||||||
|
|
||||||
// Presence update
|
// Presence update
|
||||||
services().rooms.edus.presence.update_presence(
|
services().rooms.edus.presence.update_presence(
|
||||||
|
|
|
@ -5,11 +5,11 @@ use ruma::{
|
||||||
push::{
|
push::{
|
||||||
delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled,
|
delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled,
|
||||||
get_pushrules_all, set_pusher, set_pushrule, set_pushrule_actions,
|
get_pushrules_all, set_pusher, set_pushrule, set_pushrule_actions,
|
||||||
set_pushrule_enabled, RuleScope,
|
set_pushrule_enabled, RuleKind, RuleScope,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
events::{push_rules::PushRulesEvent, GlobalAccountDataEventType},
|
events::{push_rules::PushRulesEvent, GlobalAccountDataEventType},
|
||||||
push::{InsertPushRuleError, RemovePushRuleError},
|
push::{ConditionalPushRuleInit, NewPushRule, PatternedPushRuleInit, SimplePushRuleInit},
|
||||||
};
|
};
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/pushrules`
|
/// # `GET /_matrix/client/r0/pushrules`
|
||||||
|
@ -65,10 +65,30 @@ pub async fn get_pushrule_route(
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
|
||||||
.content;
|
.content;
|
||||||
|
|
||||||
let rule = account_data
|
let global = account_data.global;
|
||||||
.global
|
let rule = match body.kind {
|
||||||
.get(body.kind.clone(), &body.rule_id)
|
RuleKind::Override => global
|
||||||
.map(Into::into);
|
.override_
|
||||||
|
.get(body.rule_id.as_str())
|
||||||
|
.map(|rule| rule.clone().into()),
|
||||||
|
RuleKind::Underride => global
|
||||||
|
.underride
|
||||||
|
.get(body.rule_id.as_str())
|
||||||
|
.map(|rule| rule.clone().into()),
|
||||||
|
RuleKind::Sender => global
|
||||||
|
.sender
|
||||||
|
.get(body.rule_id.as_str())
|
||||||
|
.map(|rule| rule.clone().into()),
|
||||||
|
RuleKind::Room => global
|
||||||
|
.room
|
||||||
|
.get(body.rule_id.as_str())
|
||||||
|
.map(|rule| rule.clone().into()),
|
||||||
|
RuleKind::Content => global
|
||||||
|
.content
|
||||||
|
.get(body.rule_id.as_str())
|
||||||
|
.map(|rule| rule.clone().into()),
|
||||||
|
_ => None,
|
||||||
|
};
|
||||||
|
|
||||||
if let Some(rule) = rule {
|
if let Some(rule) = rule {
|
||||||
Ok(get_pushrule::v3::Response { rule })
|
Ok(get_pushrule::v3::Response { rule })
|
||||||
|
@ -111,36 +131,66 @@ pub async fn set_pushrule_route(
|
||||||
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
||||||
|
|
||||||
if let Err(error) = account_data.content.global.insert(
|
let global = &mut account_data.content.global;
|
||||||
body.rule.clone(),
|
match body.rule {
|
||||||
body.after.as_deref(),
|
NewPushRule::Override(rule) => {
|
||||||
body.before.as_deref(),
|
global.override_.replace(
|
||||||
) {
|
ConditionalPushRuleInit {
|
||||||
let err = match error {
|
actions: rule.actions,
|
||||||
InsertPushRuleError::ServerDefaultRuleId => Error::BadRequest(
|
default: false,
|
||||||
ErrorKind::InvalidParam,
|
enabled: true,
|
||||||
"Rule IDs starting with a dot are reserved for server-default rules.",
|
rule_id: rule.rule_id,
|
||||||
),
|
conditions: rule.conditions,
|
||||||
InsertPushRuleError::InvalidRuleId => Error::BadRequest(
|
}
|
||||||
ErrorKind::InvalidParam,
|
.into(),
|
||||||
"Rule ID containing invalid characters.",
|
);
|
||||||
),
|
}
|
||||||
InsertPushRuleError::RelativeToServerDefaultRule => Error::BadRequest(
|
NewPushRule::Underride(rule) => {
|
||||||
ErrorKind::InvalidParam,
|
global.underride.replace(
|
||||||
"Can't place a push rule relatively to a server-default rule.",
|
ConditionalPushRuleInit {
|
||||||
),
|
actions: rule.actions,
|
||||||
InsertPushRuleError::UnknownRuleId => Error::BadRequest(
|
default: false,
|
||||||
ErrorKind::NotFound,
|
enabled: true,
|
||||||
"The before or after rule could not be found.",
|
rule_id: rule.rule_id,
|
||||||
),
|
conditions: rule.conditions,
|
||||||
InsertPushRuleError::BeforeHigherThanAfter => Error::BadRequest(
|
}
|
||||||
ErrorKind::InvalidParam,
|
.into(),
|
||||||
"The before rule has a higher priority than the after rule.",
|
);
|
||||||
),
|
}
|
||||||
_ => Error::BadRequest(ErrorKind::InvalidParam, "Invalid data."),
|
NewPushRule::Sender(rule) => {
|
||||||
};
|
global.sender.replace(
|
||||||
|
SimplePushRuleInit {
|
||||||
return Err(err);
|
actions: rule.actions,
|
||||||
|
default: false,
|
||||||
|
enabled: true,
|
||||||
|
rule_id: rule.rule_id,
|
||||||
|
}
|
||||||
|
.into(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
NewPushRule::Room(rule) => {
|
||||||
|
global.room.replace(
|
||||||
|
SimplePushRuleInit {
|
||||||
|
actions: rule.actions,
|
||||||
|
default: false,
|
||||||
|
enabled: true,
|
||||||
|
rule_id: rule.rule_id,
|
||||||
|
}
|
||||||
|
.into(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
NewPushRule::Content(rule) => {
|
||||||
|
global.content.replace(
|
||||||
|
PatternedPushRuleInit {
|
||||||
|
actions: rule.actions,
|
||||||
|
default: false,
|
||||||
|
enabled: true,
|
||||||
|
rule_id: rule.rule_id,
|
||||||
|
pattern: rule.pattern,
|
||||||
|
}
|
||||||
|
.into(),
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
services().account_data.update(
|
services().account_data.update(
|
||||||
|
@ -185,15 +235,33 @@ pub async fn get_pushrule_actions_route(
|
||||||
.content;
|
.content;
|
||||||
|
|
||||||
let global = account_data.global;
|
let global = account_data.global;
|
||||||
let actions = global
|
let actions = match body.kind {
|
||||||
.get(body.kind.clone(), &body.rule_id)
|
RuleKind::Override => global
|
||||||
.map(|rule| rule.actions().to_owned())
|
.override_
|
||||||
.ok_or(Error::BadRequest(
|
.get(body.rule_id.as_str())
|
||||||
ErrorKind::NotFound,
|
.map(|rule| rule.actions.clone()),
|
||||||
"Push rule not found.",
|
RuleKind::Underride => global
|
||||||
))?;
|
.underride
|
||||||
|
.get(body.rule_id.as_str())
|
||||||
|
.map(|rule| rule.actions.clone()),
|
||||||
|
RuleKind::Sender => global
|
||||||
|
.sender
|
||||||
|
.get(body.rule_id.as_str())
|
||||||
|
.map(|rule| rule.actions.clone()),
|
||||||
|
RuleKind::Room => global
|
||||||
|
.room
|
||||||
|
.get(body.rule_id.as_str())
|
||||||
|
.map(|rule| rule.actions.clone()),
|
||||||
|
RuleKind::Content => global
|
||||||
|
.content
|
||||||
|
.get(body.rule_id.as_str())
|
||||||
|
.map(|rule| rule.actions.clone()),
|
||||||
|
_ => None,
|
||||||
|
};
|
||||||
|
|
||||||
Ok(get_pushrule_actions::v3::Response { actions })
|
Ok(get_pushrule_actions::v3::Response {
|
||||||
|
actions: actions.unwrap_or_default(),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions`
|
/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions`
|
||||||
|
@ -226,17 +294,40 @@ pub async fn set_pushrule_actions_route(
|
||||||
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
||||||
|
|
||||||
if account_data
|
let global = &mut account_data.content.global;
|
||||||
.content
|
match body.kind {
|
||||||
.global
|
RuleKind::Override => {
|
||||||
.set_actions(body.kind.clone(), &body.rule_id, body.actions.clone())
|
if let Some(mut rule) = global.override_.get(body.rule_id.as_str()).cloned() {
|
||||||
.is_err()
|
rule.actions = body.actions.clone();
|
||||||
{
|
global.override_.replace(rule);
|
||||||
return Err(Error::BadRequest(
|
}
|
||||||
ErrorKind::NotFound,
|
}
|
||||||
"Push rule not found.",
|
RuleKind::Underride => {
|
||||||
));
|
if let Some(mut rule) = global.underride.get(body.rule_id.as_str()).cloned() {
|
||||||
}
|
rule.actions = body.actions.clone();
|
||||||
|
global.underride.replace(rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RuleKind::Sender => {
|
||||||
|
if let Some(mut rule) = global.sender.get(body.rule_id.as_str()).cloned() {
|
||||||
|
rule.actions = body.actions.clone();
|
||||||
|
global.sender.replace(rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RuleKind::Room => {
|
||||||
|
if let Some(mut rule) = global.room.get(body.rule_id.as_str()).cloned() {
|
||||||
|
rule.actions = body.actions.clone();
|
||||||
|
global.room.replace(rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RuleKind::Content => {
|
||||||
|
if let Some(mut rule) = global.content.get(body.rule_id.as_str()).cloned() {
|
||||||
|
rule.actions = body.actions.clone();
|
||||||
|
global.content.replace(rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
};
|
||||||
|
|
||||||
services().account_data.update(
|
services().account_data.update(
|
||||||
None,
|
None,
|
||||||
|
@ -279,13 +370,34 @@ pub async fn get_pushrule_enabled_route(
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
||||||
|
|
||||||
let global = account_data.content.global;
|
let global = account_data.content.global;
|
||||||
let enabled = global
|
let enabled = match body.kind {
|
||||||
.get(body.kind.clone(), &body.rule_id)
|
RuleKind::Override => global
|
||||||
.map(|r| r.enabled())
|
.override_
|
||||||
.ok_or(Error::BadRequest(
|
.iter()
|
||||||
ErrorKind::NotFound,
|
.find(|rule| rule.rule_id == body.rule_id)
|
||||||
"Push rule not found.",
|
.map_or(false, |rule| rule.enabled),
|
||||||
))?;
|
RuleKind::Underride => global
|
||||||
|
.underride
|
||||||
|
.iter()
|
||||||
|
.find(|rule| rule.rule_id == body.rule_id)
|
||||||
|
.map_or(false, |rule| rule.enabled),
|
||||||
|
RuleKind::Sender => global
|
||||||
|
.sender
|
||||||
|
.iter()
|
||||||
|
.find(|rule| rule.rule_id == body.rule_id)
|
||||||
|
.map_or(false, |rule| rule.enabled),
|
||||||
|
RuleKind::Room => global
|
||||||
|
.room
|
||||||
|
.iter()
|
||||||
|
.find(|rule| rule.rule_id == body.rule_id)
|
||||||
|
.map_or(false, |rule| rule.enabled),
|
||||||
|
RuleKind::Content => global
|
||||||
|
.content
|
||||||
|
.iter()
|
||||||
|
.find(|rule| rule.rule_id == body.rule_id)
|
||||||
|
.map_or(false, |rule| rule.enabled),
|
||||||
|
_ => false,
|
||||||
|
};
|
||||||
|
|
||||||
Ok(get_pushrule_enabled::v3::Response { enabled })
|
Ok(get_pushrule_enabled::v3::Response { enabled })
|
||||||
}
|
}
|
||||||
|
@ -320,16 +432,44 @@ pub async fn set_pushrule_enabled_route(
|
||||||
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
||||||
|
|
||||||
if account_data
|
let global = &mut account_data.content.global;
|
||||||
.content
|
match body.kind {
|
||||||
.global
|
RuleKind::Override => {
|
||||||
.set_enabled(body.kind.clone(), &body.rule_id, body.enabled)
|
if let Some(mut rule) = global.override_.get(body.rule_id.as_str()).cloned() {
|
||||||
.is_err()
|
global.override_.remove(&rule);
|
||||||
{
|
rule.enabled = body.enabled;
|
||||||
return Err(Error::BadRequest(
|
global.override_.insert(rule);
|
||||||
ErrorKind::NotFound,
|
}
|
||||||
"Push rule not found.",
|
}
|
||||||
));
|
RuleKind::Underride => {
|
||||||
|
if let Some(mut rule) = global.underride.get(body.rule_id.as_str()).cloned() {
|
||||||
|
global.underride.remove(&rule);
|
||||||
|
rule.enabled = body.enabled;
|
||||||
|
global.underride.insert(rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RuleKind::Sender => {
|
||||||
|
if let Some(mut rule) = global.sender.get(body.rule_id.as_str()).cloned() {
|
||||||
|
global.sender.remove(&rule);
|
||||||
|
rule.enabled = body.enabled;
|
||||||
|
global.sender.insert(rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RuleKind::Room => {
|
||||||
|
if let Some(mut rule) = global.room.get(body.rule_id.as_str()).cloned() {
|
||||||
|
global.room.remove(&rule);
|
||||||
|
rule.enabled = body.enabled;
|
||||||
|
global.room.insert(rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RuleKind::Content => {
|
||||||
|
if let Some(mut rule) = global.content.get(body.rule_id.as_str()).cloned() {
|
||||||
|
global.content.remove(&rule);
|
||||||
|
rule.enabled = body.enabled;
|
||||||
|
global.content.insert(rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
}
|
}
|
||||||
|
|
||||||
services().account_data.update(
|
services().account_data.update(
|
||||||
|
@ -372,23 +512,34 @@ pub async fn delete_pushrule_route(
|
||||||
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
||||||
|
|
||||||
if let Err(error) = account_data
|
let global = &mut account_data.content.global;
|
||||||
.content
|
match body.kind {
|
||||||
.global
|
RuleKind::Override => {
|
||||||
.remove(body.kind.clone(), &body.rule_id)
|
if let Some(rule) = global.override_.get(body.rule_id.as_str()).cloned() {
|
||||||
{
|
global.override_.remove(&rule);
|
||||||
let err = match error {
|
|
||||||
RemovePushRuleError::ServerDefault => Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Cannot delete a server-default pushrule.",
|
|
||||||
),
|
|
||||||
RemovePushRuleError::NotFound => {
|
|
||||||
Error::BadRequest(ErrorKind::NotFound, "Push rule not found.")
|
|
||||||
}
|
}
|
||||||
_ => Error::BadRequest(ErrorKind::InvalidParam, "Invalid data."),
|
}
|
||||||
};
|
RuleKind::Underride => {
|
||||||
|
if let Some(rule) = global.underride.get(body.rule_id.as_str()).cloned() {
|
||||||
return Err(err);
|
global.underride.remove(&rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RuleKind::Sender => {
|
||||||
|
if let Some(rule) = global.sender.get(body.rule_id.as_str()).cloned() {
|
||||||
|
global.sender.remove(&rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RuleKind::Room => {
|
||||||
|
if let Some(rule) = global.room.get(body.rule_id.as_str()).cloned() {
|
||||||
|
global.room.remove(&rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RuleKind::Content => {
|
||||||
|
if let Some(rule) = global.content.get(body.rule_id.as_str()).cloned() {
|
||||||
|
global.content.remove(&rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
}
|
}
|
||||||
|
|
||||||
services().account_data.update(
|
services().account_data.update(
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{service::rooms::timeline::PduCount, services, Error, Result, Ruma};
|
use crate::{services, Error, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt},
|
api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt},
|
||||||
events::{
|
events::{
|
||||||
|
@ -34,61 +34,62 @@ pub async fn set_read_marker_route(
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if body.private_read_receipt.is_some() || body.read_receipt.is_some() {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.user
|
|
||||||
.reset_notification_counts(sender_user, &body.room_id)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(event) = &body.private_read_receipt {
|
if let Some(event) = &body.private_read_receipt {
|
||||||
let count = services()
|
let _pdu = services()
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
.get_pdu_count(event)?
|
.get_pdu(event)?
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Event does not exist.",
|
"Event does not exist.",
|
||||||
))?;
|
))?;
|
||||||
let count = match count {
|
|
||||||
PduCount::Backfilled(_) => {
|
services().rooms.edus.read_receipt.private_read_set(
|
||||||
return Err(Error::BadRequest(
|
&body.room_id,
|
||||||
ErrorKind::InvalidParam,
|
sender_user,
|
||||||
"Read receipt is in backfilled timeline",
|
services().rooms.short.get_or_create_shorteventid(event)?,
|
||||||
))
|
)?;
|
||||||
}
|
|
||||||
PduCount::Normal(c) => c,
|
|
||||||
};
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.edus
|
|
||||||
.read_receipt
|
|
||||||
.private_read_set(&body.room_id, sender_user, count)?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(event) = &body.read_receipt {
|
if let Some(event) = &body.read_receipt {
|
||||||
let mut user_receipts = BTreeMap::new();
|
let _pdu = services()
|
||||||
user_receipts.insert(
|
.rooms
|
||||||
sender_user.clone(),
|
.timeline
|
||||||
ruma::events::receipt::Receipt {
|
.get_pdu(event)?
|
||||||
ts: Some(MilliSecondsSinceUnixEpoch::now()),
|
.ok_or(Error::BadRequest(
|
||||||
thread: ReceiptThread::Unthreaded,
|
ErrorKind::InvalidParam,
|
||||||
},
|
"Event does not exist.",
|
||||||
);
|
))?;
|
||||||
|
|
||||||
let mut receipts = BTreeMap::new();
|
if services().globals.allow_public_read_receipts() {
|
||||||
receipts.insert(ReceiptType::Read, user_receipts);
|
let mut user_receipts = BTreeMap::new();
|
||||||
|
user_receipts.insert(
|
||||||
|
sender_user.clone(),
|
||||||
|
ruma::events::receipt::Receipt {
|
||||||
|
ts: Some(MilliSecondsSinceUnixEpoch::now()),
|
||||||
|
thread: ReceiptThread::Unthreaded,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
let mut receipt_content = BTreeMap::new();
|
let mut receipts = BTreeMap::new();
|
||||||
receipt_content.insert(event.to_owned(), receipts);
|
receipts.insert(ReceiptType::Read, user_receipts);
|
||||||
|
|
||||||
services().rooms.edus.read_receipt.readreceipt_update(
|
let mut receipt_content = BTreeMap::new();
|
||||||
sender_user,
|
receipt_content.insert(event.to_owned(), receipts);
|
||||||
|
|
||||||
|
services().rooms.edus.read_receipt.readreceipt_update(
|
||||||
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
ruma::events::receipt::ReceiptEvent {
|
||||||
|
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
|
||||||
|
room_id: body.room_id.clone(),
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
};
|
||||||
|
services().rooms.edus.read_receipt.private_read_set(
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
ruma::events::receipt::ReceiptEvent {
|
sender_user,
|
||||||
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
|
services().rooms.short.get_or_create_shorteventid(event)?,
|
||||||
room_id: body.room_id.clone(),
|
|
||||||
},
|
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -103,16 +104,6 @@ pub async fn create_receipt_route(
|
||||||
) -> Result<create_receipt::v3::Response> {
|
) -> Result<create_receipt::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if matches!(
|
|
||||||
&body.receipt_type,
|
|
||||||
create_receipt::v3::ReceiptType::Read | create_receipt::v3::ReceiptType::ReadPrivate
|
|
||||||
) {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.user
|
|
||||||
.reset_notification_counts(sender_user, &body.room_id)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
match body.receipt_type {
|
match body.receipt_type {
|
||||||
create_receipt::v3::ReceiptType::FullyRead => {
|
create_receipt::v3::ReceiptType::FullyRead => {
|
||||||
let fully_read_event = ruma::events::fully_read::FullyReadEvent {
|
let fully_read_event = ruma::events::fully_read::FullyReadEvent {
|
||||||
|
@ -128,51 +119,67 @@ pub async fn create_receipt_route(
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
create_receipt::v3::ReceiptType::Read => {
|
create_receipt::v3::ReceiptType::Read => {
|
||||||
let mut user_receipts = BTreeMap::new();
|
let _pdu =
|
||||||
user_receipts.insert(
|
services()
|
||||||
sender_user.clone(),
|
.rooms
|
||||||
ruma::events::receipt::Receipt {
|
.timeline
|
||||||
ts: Some(MilliSecondsSinceUnixEpoch::now()),
|
.get_pdu(&body.event_id)?
|
||||||
thread: ReceiptThread::Unthreaded,
|
.ok_or(Error::BadRequest(
|
||||||
},
|
|
||||||
);
|
|
||||||
let mut receipts = BTreeMap::new();
|
|
||||||
receipts.insert(ReceiptType::Read, user_receipts);
|
|
||||||
|
|
||||||
let mut receipt_content = BTreeMap::new();
|
|
||||||
receipt_content.insert(body.event_id.to_owned(), receipts);
|
|
||||||
|
|
||||||
services().rooms.edus.read_receipt.readreceipt_update(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
ruma::events::receipt::ReceiptEvent {
|
|
||||||
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
|
|
||||||
room_id: body.room_id.clone(),
|
|
||||||
},
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
create_receipt::v3::ReceiptType::ReadPrivate => {
|
|
||||||
let count = services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.get_pdu_count(&body.event_id)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Event does not exist.",
|
|
||||||
))?;
|
|
||||||
let count = match count {
|
|
||||||
PduCount::Backfilled(_) => {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Read receipt is in backfilled timeline",
|
"Event does not exist.",
|
||||||
))
|
))?;
|
||||||
}
|
|
||||||
PduCount::Normal(c) => c,
|
if services().globals.allow_public_read_receipts() {
|
||||||
|
let mut user_receipts = BTreeMap::new();
|
||||||
|
user_receipts.insert(
|
||||||
|
sender_user.clone(),
|
||||||
|
ruma::events::receipt::Receipt {
|
||||||
|
ts: Some(MilliSecondsSinceUnixEpoch::now()),
|
||||||
|
thread: ReceiptThread::Unthreaded,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
let mut receipts = BTreeMap::new();
|
||||||
|
receipts.insert(ReceiptType::Read, user_receipts);
|
||||||
|
|
||||||
|
let mut receipt_content = BTreeMap::new();
|
||||||
|
receipt_content.insert(body.event_id.to_owned(), receipts);
|
||||||
|
|
||||||
|
services().rooms.edus.read_receipt.readreceipt_update(
|
||||||
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
ruma::events::receipt::ReceiptEvent {
|
||||||
|
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
|
||||||
|
room_id: body.room_id.clone(),
|
||||||
|
},
|
||||||
|
)?;
|
||||||
};
|
};
|
||||||
services().rooms.edus.read_receipt.private_read_set(
|
services().rooms.edus.read_receipt.private_read_set(
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
sender_user,
|
sender_user,
|
||||||
count,
|
services()
|
||||||
|
.rooms
|
||||||
|
.short
|
||||||
|
.get_or_create_shorteventid(&body.event_id)?,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
create_receipt::v3::ReceiptType::ReadPrivate => {
|
||||||
|
let _pdu =
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.get_pdu(&body.event_id)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Event does not exist.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
services().rooms.edus.read_receipt.private_read_set(
|
||||||
|
&body.room_id,
|
||||||
|
sender_user,
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.short
|
||||||
|
.get_or_create_shorteventid(&body.event_id)?,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
_ => return Err(Error::bad_database("Unsupported receipt type")),
|
_ => return Err(Error::bad_database("Unsupported receipt type")),
|
||||||
|
|
|
@ -3,7 +3,7 @@ use std::sync::Arc;
|
||||||
use crate::{service::pdu::PduBuilder, services, Result, Ruma};
|
use crate::{service::pdu::PduBuilder, services, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::redact::redact_event,
|
api::client::redact::redact_event,
|
||||||
events::{room::redaction::RoomRedactionEventContent, TimelineEventType},
|
events::{room::redaction::RoomRedactionEventContent, RoomEventType},
|
||||||
};
|
};
|
||||||
|
|
||||||
use serde_json::value::to_raw_value;
|
use serde_json::value::to_raw_value;
|
||||||
|
@ -24,33 +24,27 @@ pub async fn redact_event_route(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.await
|
.unwrap()
|
||||||
.entry(body.room_id.clone())
|
.entry(body.room_id.clone())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
let event_id = services()
|
let event_id = services().rooms.timeline.build_and_append_pdu(
|
||||||
.rooms
|
PduBuilder {
|
||||||
.timeline
|
event_type: RoomEventType::RoomRedaction,
|
||||||
.build_and_append_pdu(
|
content: to_raw_value(&RoomRedactionEventContent {
|
||||||
PduBuilder {
|
reason: body.reason.clone(),
|
||||||
event_type: TimelineEventType::RoomRedaction,
|
})
|
||||||
content: to_raw_value(&RoomRedactionEventContent {
|
.expect("event is valid, we just created it"),
|
||||||
redacts: Some(body.event_id.clone()),
|
unsigned: None,
|
||||||
reason: body.reason.clone(),
|
state_key: None,
|
||||||
})
|
redacts: Some(body.event_id.into()),
|
||||||
.expect("event is valid, we just created it"),
|
},
|
||||||
unsigned: None,
|
sender_user,
|
||||||
state_key: None,
|
&body.room_id,
|
||||||
redacts: Some(body.event_id.into()),
|
&state_lock,
|
||||||
timestamp: None,
|
)?;
|
||||||
},
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
|
|
||||||
|
|
|
@ -1,91 +0,0 @@
|
||||||
use ruma::api::client::relations::{
|
|
||||||
get_relating_events, get_relating_events_with_rel_type,
|
|
||||||
get_relating_events_with_rel_type_and_event_type,
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{services, Result, Ruma};
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}`
|
|
||||||
pub async fn get_relating_events_with_rel_type_and_event_type_route(
|
|
||||||
body: Ruma<get_relating_events_with_rel_type_and_event_type::v1::Request>,
|
|
||||||
) -> Result<get_relating_events_with_rel_type_and_event_type::v1::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let res = services()
|
|
||||||
.rooms
|
|
||||||
.pdu_metadata
|
|
||||||
.paginate_relations_with_filter(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
&body.event_id,
|
|
||||||
Some(body.event_type.clone()),
|
|
||||||
Some(body.rel_type.clone()),
|
|
||||||
body.from.clone(),
|
|
||||||
body.to.clone(),
|
|
||||||
body.limit,
|
|
||||||
body.recurse,
|
|
||||||
&body.dir,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(
|
|
||||||
get_relating_events_with_rel_type_and_event_type::v1::Response {
|
|
||||||
chunk: res.chunk,
|
|
||||||
next_batch: res.next_batch,
|
|
||||||
prev_batch: res.prev_batch,
|
|
||||||
recursion_depth: res.recursion_depth,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}`
|
|
||||||
pub async fn get_relating_events_with_rel_type_route(
|
|
||||||
body: Ruma<get_relating_events_with_rel_type::v1::Request>,
|
|
||||||
) -> Result<get_relating_events_with_rel_type::v1::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let res = services()
|
|
||||||
.rooms
|
|
||||||
.pdu_metadata
|
|
||||||
.paginate_relations_with_filter(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
&body.event_id,
|
|
||||||
None,
|
|
||||||
Some(body.rel_type.clone()),
|
|
||||||
body.from.clone(),
|
|
||||||
body.to.clone(),
|
|
||||||
body.limit,
|
|
||||||
body.recurse,
|
|
||||||
&body.dir,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(get_relating_events_with_rel_type::v1::Response {
|
|
||||||
chunk: res.chunk,
|
|
||||||
next_batch: res.next_batch,
|
|
||||||
prev_batch: res.prev_batch,
|
|
||||||
recursion_depth: res.recursion_depth,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}`
|
|
||||||
pub async fn get_relating_events_route(
|
|
||||||
body: Ruma<get_relating_events::v1::Request>,
|
|
||||||
) -> Result<get_relating_events::v1::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.pdu_metadata
|
|
||||||
.paginate_relations_with_filter(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
&body.event_id,
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
body.from.clone(),
|
|
||||||
body.to.clone(),
|
|
||||||
body.limit,
|
|
||||||
body.recurse,
|
|
||||||
&body.dir,
|
|
||||||
)
|
|
||||||
}
|
|
|
@ -19,11 +19,11 @@ use ruma::{
|
||||||
tombstone::RoomTombstoneEventContent,
|
tombstone::RoomTombstoneEventContent,
|
||||||
topic::RoomTopicEventContent,
|
topic::RoomTopicEventContent,
|
||||||
},
|
},
|
||||||
StateEventType, TimelineEventType,
|
RoomEventType, StateEventType,
|
||||||
},
|
},
|
||||||
int,
|
int,
|
||||||
serde::JsonObject,
|
serde::JsonObject,
|
||||||
CanonicalJsonObject, OwnedRoomAliasId, RoomAliasId, RoomId, RoomVersionId,
|
CanonicalJsonObject, OwnedRoomAliasId, RoomAliasId, RoomId,
|
||||||
};
|
};
|
||||||
use serde_json::{json, value::to_raw_value};
|
use serde_json::{json, value::to_raw_value};
|
||||||
use std::{cmp::max, collections::BTreeMap, sync::Arc};
|
use std::{cmp::max, collections::BTreeMap, sync::Arc};
|
||||||
|
@ -61,18 +61,18 @@ pub async fn create_room_route(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.await
|
.unwrap()
|
||||||
.entry(room_id.clone())
|
.entry(room_id.clone())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
if !services().globals.allow_room_creation()
|
if !services().globals.allow_room_creation()
|
||||||
&& body.appservice_info.is_none()
|
&& !body.from_appservice
|
||||||
&& !services().users.is_admin(sender_user)?
|
&& !services().users.is_admin(sender_user)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::forbidden(),
|
ErrorKind::Forbidden,
|
||||||
"Room creation has been disabled.",
|
"Room creation has been disabled.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -104,22 +104,6 @@ pub async fn create_room_route(
|
||||||
}
|
}
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
if let Some(ref alias) = alias {
|
|
||||||
if let Some(ref info) = body.appservice_info {
|
|
||||||
if !info.aliases.is_match(alias.as_str()) {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"Room alias is not in namespace.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
} else if services().appservice.is_exclusive_alias(alias).await {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"Room alias reserved by appservice.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let room_version = match body.room_version.clone() {
|
let room_version = match body.room_version.clone() {
|
||||||
Some(room_version) => {
|
Some(room_version) => {
|
||||||
if services()
|
if services()
|
||||||
|
@ -143,29 +127,12 @@ pub async fn create_room_route(
|
||||||
let mut content = content
|
let mut content = content
|
||||||
.deserialize_as::<CanonicalJsonObject>()
|
.deserialize_as::<CanonicalJsonObject>()
|
||||||
.expect("Invalid creation content");
|
.expect("Invalid creation content");
|
||||||
|
content.insert(
|
||||||
match room_version {
|
"creator".into(),
|
||||||
RoomVersionId::V1
|
json!(&sender_user).try_into().map_err(|_| {
|
||||||
| RoomVersionId::V2
|
Error::BadRequest(ErrorKind::BadJson, "Invalid creation content")
|
||||||
| RoomVersionId::V3
|
})?,
|
||||||
| RoomVersionId::V4
|
);
|
||||||
| RoomVersionId::V5
|
|
||||||
| RoomVersionId::V6
|
|
||||||
| RoomVersionId::V7
|
|
||||||
| RoomVersionId::V8
|
|
||||||
| RoomVersionId::V9
|
|
||||||
| RoomVersionId::V10 => {
|
|
||||||
content.insert(
|
|
||||||
"creator".into(),
|
|
||||||
json!(&sender_user).try_into().map_err(|_| {
|
|
||||||
Error::BadRequest(ErrorKind::BadJson, "Invalid creation content")
|
|
||||||
})?,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
RoomVersionId::V11 => {} // V11 removed the "creator" key
|
|
||||||
_ => unreachable!("Validity of room version already checked"),
|
|
||||||
}
|
|
||||||
|
|
||||||
content.insert(
|
content.insert(
|
||||||
"room_version".into(),
|
"room_version".into(),
|
||||||
json!(room_version.as_str()).try_into().map_err(|_| {
|
json!(room_version.as_str()).try_into().map_err(|_| {
|
||||||
|
@ -175,22 +142,8 @@ pub async fn create_room_route(
|
||||||
content
|
content
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
let content = match room_version {
|
|
||||||
RoomVersionId::V1
|
|
||||||
| RoomVersionId::V2
|
|
||||||
| RoomVersionId::V3
|
|
||||||
| RoomVersionId::V4
|
|
||||||
| RoomVersionId::V5
|
|
||||||
| RoomVersionId::V6
|
|
||||||
| RoomVersionId::V7
|
|
||||||
| RoomVersionId::V8
|
|
||||||
| RoomVersionId::V9
|
|
||||||
| RoomVersionId::V10 => RoomCreateEventContent::new_v1(sender_user.clone()),
|
|
||||||
RoomVersionId::V11 => RoomCreateEventContent::new_v11(),
|
|
||||||
_ => unreachable!("Validity of room version already checked"),
|
|
||||||
};
|
|
||||||
let mut content = serde_json::from_str::<CanonicalJsonObject>(
|
let mut content = serde_json::from_str::<CanonicalJsonObject>(
|
||||||
to_raw_value(&content)
|
to_raw_value(&RoomCreateEventContent::new(sender_user.clone()))
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))?
|
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))?
|
||||||
.get(),
|
.get(),
|
||||||
)
|
)
|
||||||
|
@ -220,52 +173,42 @@ pub async fn create_room_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
// 1. The room create event
|
// 1. The room create event
|
||||||
services()
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
.rooms
|
PduBuilder {
|
||||||
.timeline
|
event_type: RoomEventType::RoomCreate,
|
||||||
.build_and_append_pdu(
|
content: to_raw_value(&content).expect("event is valid, we just created it"),
|
||||||
PduBuilder {
|
unsigned: None,
|
||||||
event_type: TimelineEventType::RoomCreate,
|
state_key: Some("".to_owned()),
|
||||||
content: to_raw_value(&content).expect("event is valid, we just created it"),
|
redacts: None,
|
||||||
unsigned: None,
|
},
|
||||||
state_key: Some("".to_owned()),
|
sender_user,
|
||||||
redacts: None,
|
&room_id,
|
||||||
timestamp: None,
|
&state_lock,
|
||||||
},
|
)?;
|
||||||
sender_user,
|
|
||||||
&room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// 2. Let the room creator join
|
// 2. Let the room creator join
|
||||||
services()
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
.rooms
|
PduBuilder {
|
||||||
.timeline
|
event_type: RoomEventType::RoomMember,
|
||||||
.build_and_append_pdu(
|
content: to_raw_value(&RoomMemberEventContent {
|
||||||
PduBuilder {
|
membership: MembershipState::Join,
|
||||||
event_type: TimelineEventType::RoomMember,
|
displayname: services().users.displayname(sender_user)?,
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
avatar_url: services().users.avatar_url(sender_user)?,
|
||||||
membership: MembershipState::Join,
|
is_direct: Some(body.is_direct),
|
||||||
displayname: services().users.displayname(sender_user)?,
|
third_party_invite: None,
|
||||||
avatar_url: services().users.avatar_url(sender_user)?,
|
blurhash: services().users.blurhash(sender_user)?,
|
||||||
is_direct: Some(body.is_direct),
|
reason: None,
|
||||||
third_party_invite: None,
|
join_authorized_via_users_server: None,
|
||||||
blurhash: services().users.blurhash(sender_user)?,
|
})
|
||||||
reason: None,
|
.expect("event is valid, we just created it"),
|
||||||
join_authorized_via_users_server: None,
|
unsigned: None,
|
||||||
})
|
state_key: Some(sender_user.to_string()),
|
||||||
.expect("event is valid, we just created it"),
|
redacts: None,
|
||||||
unsigned: None,
|
},
|
||||||
state_key: Some(sender_user.to_string()),
|
sender_user,
|
||||||
redacts: None,
|
&room_id,
|
||||||
timestamp: None,
|
&state_lock,
|
||||||
},
|
)?;
|
||||||
sender_user,
|
|
||||||
&room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// 3. Power levels
|
// 3. Power levels
|
||||||
|
|
||||||
|
@ -302,120 +245,95 @@ pub async fn create_room_route(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
services()
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
.rooms
|
PduBuilder {
|
||||||
.timeline
|
event_type: RoomEventType::RoomPowerLevels,
|
||||||
.build_and_append_pdu(
|
content: to_raw_value(&power_levels_content)
|
||||||
|
.expect("to_raw_value always works on serde_json::Value"),
|
||||||
|
unsigned: None,
|
||||||
|
state_key: Some("".to_owned()),
|
||||||
|
redacts: None,
|
||||||
|
},
|
||||||
|
sender_user,
|
||||||
|
&room_id,
|
||||||
|
&state_lock,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
// 4. Canonical room alias
|
||||||
|
if let Some(room_alias_id) = &alias {
|
||||||
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: TimelineEventType::RoomPowerLevels,
|
event_type: RoomEventType::RoomCanonicalAlias,
|
||||||
content: to_raw_value(&power_levels_content)
|
content: to_raw_value(&RoomCanonicalAliasEventContent {
|
||||||
.expect("to_raw_value always works on serde_json::Value"),
|
alias: Some(room_alias_id.to_owned()),
|
||||||
|
alt_aliases: vec![],
|
||||||
|
})
|
||||||
|
.expect("We checked that alias earlier, it must be fine"),
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
redacts: None,
|
redacts: None,
|
||||||
timestamp: None,
|
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)
|
)?;
|
||||||
.await?;
|
|
||||||
|
|
||||||
// 4. Canonical room alias
|
|
||||||
if let Some(room_alias_id) = &alias {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: TimelineEventType::RoomCanonicalAlias,
|
|
||||||
content: to_raw_value(&RoomCanonicalAliasEventContent {
|
|
||||||
alias: Some(room_alias_id.to_owned()),
|
|
||||||
alt_aliases: vec![],
|
|
||||||
})
|
|
||||||
.expect("We checked that alias earlier, it must be fine"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some("".to_owned()),
|
|
||||||
redacts: None,
|
|
||||||
timestamp: None,
|
|
||||||
},
|
|
||||||
sender_user,
|
|
||||||
&room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// 5. Events set by preset
|
// 5. Events set by preset
|
||||||
|
|
||||||
// 5.1 Join Rules
|
// 5.1 Join Rules
|
||||||
services()
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
.rooms
|
PduBuilder {
|
||||||
.timeline
|
event_type: RoomEventType::RoomJoinRules,
|
||||||
.build_and_append_pdu(
|
content: to_raw_value(&RoomJoinRulesEventContent::new(match preset {
|
||||||
PduBuilder {
|
RoomPreset::PublicChat => JoinRule::Public,
|
||||||
event_type: TimelineEventType::RoomJoinRules,
|
// according to spec "invite" is the default
|
||||||
content: to_raw_value(&RoomJoinRulesEventContent::new(match preset {
|
_ => JoinRule::Invite,
|
||||||
RoomPreset::PublicChat => JoinRule::Public,
|
}))
|
||||||
// according to spec "invite" is the default
|
.expect("event is valid, we just created it"),
|
||||||
_ => JoinRule::Invite,
|
unsigned: None,
|
||||||
}))
|
state_key: Some("".to_owned()),
|
||||||
.expect("event is valid, we just created it"),
|
redacts: None,
|
||||||
unsigned: None,
|
},
|
||||||
state_key: Some("".to_owned()),
|
sender_user,
|
||||||
redacts: None,
|
&room_id,
|
||||||
timestamp: None,
|
&state_lock,
|
||||||
},
|
)?;
|
||||||
sender_user,
|
|
||||||
&room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// 5.2 History Visibility
|
// 5.2 History Visibility
|
||||||
services()
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
.rooms
|
PduBuilder {
|
||||||
.timeline
|
event_type: RoomEventType::RoomHistoryVisibility,
|
||||||
.build_and_append_pdu(
|
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
|
||||||
PduBuilder {
|
HistoryVisibility::Shared,
|
||||||
event_type: TimelineEventType::RoomHistoryVisibility,
|
))
|
||||||
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
|
.expect("event is valid, we just created it"),
|
||||||
HistoryVisibility::Shared,
|
unsigned: None,
|
||||||
))
|
state_key: Some("".to_owned()),
|
||||||
.expect("event is valid, we just created it"),
|
redacts: None,
|
||||||
unsigned: None,
|
},
|
||||||
state_key: Some("".to_owned()),
|
sender_user,
|
||||||
redacts: None,
|
&room_id,
|
||||||
timestamp: None,
|
&state_lock,
|
||||||
},
|
)?;
|
||||||
sender_user,
|
|
||||||
&room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// 5.3 Guest Access
|
// 5.3 Guest Access
|
||||||
services()
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
.rooms
|
PduBuilder {
|
||||||
.timeline
|
event_type: RoomEventType::RoomGuestAccess,
|
||||||
.build_and_append_pdu(
|
content: to_raw_value(&RoomGuestAccessEventContent::new(match preset {
|
||||||
PduBuilder {
|
RoomPreset::PublicChat => GuestAccess::Forbidden,
|
||||||
event_type: TimelineEventType::RoomGuestAccess,
|
_ => GuestAccess::CanJoin,
|
||||||
content: to_raw_value(&RoomGuestAccessEventContent::new(match preset {
|
}))
|
||||||
RoomPreset::PublicChat => GuestAccess::Forbidden,
|
.expect("event is valid, we just created it"),
|
||||||
_ => GuestAccess::CanJoin,
|
unsigned: None,
|
||||||
}))
|
state_key: Some("".to_owned()),
|
||||||
.expect("event is valid, we just created it"),
|
redacts: None,
|
||||||
unsigned: None,
|
},
|
||||||
state_key: Some("".to_owned()),
|
sender_user,
|
||||||
redacts: None,
|
&room_id,
|
||||||
timestamp: None,
|
&state_lock,
|
||||||
},
|
)?;
|
||||||
sender_user,
|
|
||||||
&room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// 6. Events listed in initial_state
|
// 6. Events listed in initial_state
|
||||||
for event in &body.initial_state {
|
for event in &body.initial_state {
|
||||||
|
@ -428,76 +346,64 @@ pub async fn create_room_route(
|
||||||
pdu_builder.state_key.get_or_insert_with(|| "".to_owned());
|
pdu_builder.state_key.get_or_insert_with(|| "".to_owned());
|
||||||
|
|
||||||
// Silently skip encryption events if they are not allowed
|
// Silently skip encryption events if they are not allowed
|
||||||
if pdu_builder.event_type == TimelineEventType::RoomEncryption
|
if pdu_builder.event_type == RoomEventType::RoomEncryption
|
||||||
&& !services().globals.allow_encryption()
|
&& !services().globals.allow_encryption()
|
||||||
{
|
{
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
services()
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
.rooms
|
pdu_builder,
|
||||||
.timeline
|
sender_user,
|
||||||
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)
|
&room_id,
|
||||||
.await?;
|
&state_lock,
|
||||||
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// 7. Events implied by name and topic
|
// 7. Events implied by name and topic
|
||||||
if let Some(name) = &body.name {
|
if let Some(name) = &body.name {
|
||||||
services()
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
.rooms
|
PduBuilder {
|
||||||
.timeline
|
event_type: RoomEventType::RoomName,
|
||||||
.build_and_append_pdu(
|
content: to_raw_value(&RoomNameEventContent::new(Some(name.clone())))
|
||||||
PduBuilder {
|
.expect("event is valid, we just created it"),
|
||||||
event_type: TimelineEventType::RoomName,
|
unsigned: None,
|
||||||
content: to_raw_value(&RoomNameEventContent::new(name.clone()))
|
state_key: Some("".to_owned()),
|
||||||
.expect("event is valid, we just created it"),
|
redacts: None,
|
||||||
unsigned: None,
|
},
|
||||||
state_key: Some("".to_owned()),
|
sender_user,
|
||||||
redacts: None,
|
&room_id,
|
||||||
timestamp: None,
|
&state_lock,
|
||||||
},
|
)?;
|
||||||
sender_user,
|
|
||||||
&room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(topic) = &body.topic {
|
if let Some(topic) = &body.topic {
|
||||||
services()
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
.rooms
|
PduBuilder {
|
||||||
.timeline
|
event_type: RoomEventType::RoomTopic,
|
||||||
.build_and_append_pdu(
|
content: to_raw_value(&RoomTopicEventContent {
|
||||||
PduBuilder {
|
topic: topic.clone(),
|
||||||
event_type: TimelineEventType::RoomTopic,
|
})
|
||||||
content: to_raw_value(&RoomTopicEventContent {
|
.expect("event is valid, we just created it"),
|
||||||
topic: topic.clone(),
|
unsigned: None,
|
||||||
})
|
state_key: Some("".to_owned()),
|
||||||
.expect("event is valid, we just created it"),
|
redacts: None,
|
||||||
unsigned: None,
|
},
|
||||||
state_key: Some("".to_owned()),
|
sender_user,
|
||||||
redacts: None,
|
&room_id,
|
||||||
timestamp: None,
|
&state_lock,
|
||||||
},
|
)?;
|
||||||
sender_user,
|
|
||||||
&room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// 8. Events implied by invite (and TODO: invite_3pid)
|
// 8. Events implied by invite (and TODO: invite_3pid)
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
for user_id in &body.invite {
|
for user_id in &body.invite {
|
||||||
let _ = invite_helper(sender_user, user_id, &room_id, None, body.is_direct).await;
|
let _ = invite_helper(sender_user, user_id, &room_id, body.is_direct).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Homeserver specific stuff
|
// Homeserver specific stuff
|
||||||
if let Some(alias) = alias {
|
if let Some(alias) = alias {
|
||||||
services()
|
services().rooms.alias.set_alias(&alias, &room_id)?;
|
||||||
.rooms
|
|
||||||
.alias
|
|
||||||
.set_alias(&alias, &room_id, sender_user)?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if body.visibility == room::Visibility::Public {
|
if body.visibility == room::Visibility::Public {
|
||||||
|
@ -519,31 +425,24 @@ pub async fn get_room_event_route(
|
||||||
) -> Result<get_room_event::v3::Response> {
|
) -> Result<get_room_event::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let event = services()
|
if !services()
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.state_cache
|
||||||
.get_pdu(&body.event_id)?
|
.is_joined(sender_user, &body.room_id)?
|
||||||
.ok_or_else(|| {
|
{
|
||||||
warn!("Event not found, event ID: {:?}", &body.event_id);
|
|
||||||
Error::BadRequest(ErrorKind::NotFound, "Event not found.")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if !services().rooms.state_accessor.user_can_see_event(
|
|
||||||
sender_user,
|
|
||||||
&event.room_id,
|
|
||||||
&body.event_id,
|
|
||||||
)? {
|
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::forbidden(),
|
ErrorKind::Forbidden,
|
||||||
"You don't have permission to view this event.",
|
"You don't have permission to view this room.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut event = (*event).clone();
|
|
||||||
event.add_age()?;
|
|
||||||
|
|
||||||
Ok(get_room_event::v3::Response {
|
Ok(get_room_event::v3::Response {
|
||||||
event: event.to_room_event(),
|
event: services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.get_pdu(&body.event_id)?
|
||||||
|
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?
|
||||||
|
.to_room_event(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -563,7 +462,7 @@ pub async fn get_room_aliases_route(
|
||||||
.is_joined(sender_user, &body.room_id)?
|
.is_joined(sender_user, &body.room_id)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::forbidden(),
|
ErrorKind::Forbidden,
|
||||||
"You don't have permission to view this room.",
|
"You don't have permission to view this room.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -616,7 +515,7 @@ pub async fn upgrade_room_route(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.await
|
.unwrap()
|
||||||
.entry(body.room_id.clone())
|
.entry(body.room_id.clone())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
|
@ -624,27 +523,22 @@ pub async fn upgrade_room_route(
|
||||||
|
|
||||||
// Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further
|
// Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further
|
||||||
// Fail if the sender does not have the required permissions
|
// Fail if the sender does not have the required permissions
|
||||||
let tombstone_event_id = services()
|
let tombstone_event_id = services().rooms.timeline.build_and_append_pdu(
|
||||||
.rooms
|
PduBuilder {
|
||||||
.timeline
|
event_type: RoomEventType::RoomTombstone,
|
||||||
.build_and_append_pdu(
|
content: to_raw_value(&RoomTombstoneEventContent {
|
||||||
PduBuilder {
|
body: "This room has been replaced".to_owned(),
|
||||||
event_type: TimelineEventType::RoomTombstone,
|
replacement_room: replacement_room.clone(),
|
||||||
content: to_raw_value(&RoomTombstoneEventContent {
|
})
|
||||||
body: "This room has been replaced".to_owned(),
|
.expect("event is valid, we just created it"),
|
||||||
replacement_room: replacement_room.clone(),
|
unsigned: None,
|
||||||
})
|
state_key: Some("".to_owned()),
|
||||||
.expect("event is valid, we just created it"),
|
redacts: None,
|
||||||
unsigned: None,
|
},
|
||||||
state_key: Some("".to_owned()),
|
sender_user,
|
||||||
redacts: None,
|
&body.room_id,
|
||||||
timestamp: None,
|
&state_lock,
|
||||||
},
|
)?;
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// Change lock to replacement room
|
// Change lock to replacement room
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
|
@ -653,7 +547,7 @@ pub async fn upgrade_room_route(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.await
|
.unwrap()
|
||||||
.entry(replacement_room.clone())
|
.entry(replacement_room.clone())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
|
@ -678,30 +572,12 @@ pub async fn upgrade_room_route(
|
||||||
));
|
));
|
||||||
|
|
||||||
// Send a m.room.create event containing a predecessor field and the applicable room_version
|
// Send a m.room.create event containing a predecessor field and the applicable room_version
|
||||||
match body.new_version {
|
create_event_content.insert(
|
||||||
RoomVersionId::V1
|
"creator".into(),
|
||||||
| RoomVersionId::V2
|
json!(&sender_user)
|
||||||
| RoomVersionId::V3
|
.try_into()
|
||||||
| RoomVersionId::V4
|
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?,
|
||||||
| RoomVersionId::V5
|
);
|
||||||
| RoomVersionId::V6
|
|
||||||
| RoomVersionId::V7
|
|
||||||
| RoomVersionId::V8
|
|
||||||
| RoomVersionId::V9
|
|
||||||
| RoomVersionId::V10 => {
|
|
||||||
create_event_content.insert(
|
|
||||||
"creator".into(),
|
|
||||||
json!(&sender_user).try_into().map_err(|_| {
|
|
||||||
Error::BadRequest(ErrorKind::BadJson, "Error forming creation event")
|
|
||||||
})?,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
RoomVersionId::V11 => {
|
|
||||||
// "creator" key no longer exists in V11 rooms
|
|
||||||
create_event_content.remove("creator");
|
|
||||||
}
|
|
||||||
_ => unreachable!("Validity of room version already checked"),
|
|
||||||
}
|
|
||||||
create_event_content.insert(
|
create_event_content.insert(
|
||||||
"room_version".into(),
|
"room_version".into(),
|
||||||
json!(&body.new_version)
|
json!(&body.new_version)
|
||||||
|
@ -729,53 +605,43 @@ pub async fn upgrade_room_route(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
services()
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
.rooms
|
PduBuilder {
|
||||||
.timeline
|
event_type: RoomEventType::RoomCreate,
|
||||||
.build_and_append_pdu(
|
content: to_raw_value(&create_event_content)
|
||||||
PduBuilder {
|
.expect("event is valid, we just created it"),
|
||||||
event_type: TimelineEventType::RoomCreate,
|
unsigned: None,
|
||||||
content: to_raw_value(&create_event_content)
|
state_key: Some("".to_owned()),
|
||||||
.expect("event is valid, we just created it"),
|
redacts: None,
|
||||||
unsigned: None,
|
},
|
||||||
state_key: Some("".to_owned()),
|
sender_user,
|
||||||
redacts: None,
|
&replacement_room,
|
||||||
timestamp: None,
|
&state_lock,
|
||||||
},
|
)?;
|
||||||
sender_user,
|
|
||||||
&replacement_room,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// Join the new room
|
// Join the new room
|
||||||
services()
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
.rooms
|
PduBuilder {
|
||||||
.timeline
|
event_type: RoomEventType::RoomMember,
|
||||||
.build_and_append_pdu(
|
content: to_raw_value(&RoomMemberEventContent {
|
||||||
PduBuilder {
|
membership: MembershipState::Join,
|
||||||
event_type: TimelineEventType::RoomMember,
|
displayname: services().users.displayname(sender_user)?,
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
avatar_url: services().users.avatar_url(sender_user)?,
|
||||||
membership: MembershipState::Join,
|
is_direct: None,
|
||||||
displayname: services().users.displayname(sender_user)?,
|
third_party_invite: None,
|
||||||
avatar_url: services().users.avatar_url(sender_user)?,
|
blurhash: services().users.blurhash(sender_user)?,
|
||||||
is_direct: None,
|
reason: None,
|
||||||
third_party_invite: None,
|
join_authorized_via_users_server: None,
|
||||||
blurhash: services().users.blurhash(sender_user)?,
|
})
|
||||||
reason: None,
|
.expect("event is valid, we just created it"),
|
||||||
join_authorized_via_users_server: None,
|
unsigned: None,
|
||||||
})
|
state_key: Some(sender_user.to_string()),
|
||||||
.expect("event is valid, we just created it"),
|
redacts: None,
|
||||||
unsigned: None,
|
},
|
||||||
state_key: Some(sender_user.to_string()),
|
sender_user,
|
||||||
redacts: None,
|
&replacement_room,
|
||||||
timestamp: None,
|
&state_lock,
|
||||||
},
|
)?;
|
||||||
sender_user,
|
|
||||||
&replacement_room,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// Recommended transferable state events list from the specs
|
// Recommended transferable state events list from the specs
|
||||||
let transferable_state_events = vec![
|
let transferable_state_events = vec![
|
||||||
|
@ -802,23 +668,18 @@ pub async fn upgrade_room_route(
|
||||||
None => continue, // Skipping missing events.
|
None => continue, // Skipping missing events.
|
||||||
};
|
};
|
||||||
|
|
||||||
services()
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
.rooms
|
PduBuilder {
|
||||||
.timeline
|
event_type: event_type.to_string().into(),
|
||||||
.build_and_append_pdu(
|
content: event_content,
|
||||||
PduBuilder {
|
unsigned: None,
|
||||||
event_type: event_type.to_string().into(),
|
state_key: Some("".to_owned()),
|
||||||
content: event_content,
|
redacts: None,
|
||||||
unsigned: None,
|
},
|
||||||
state_key: Some("".to_owned()),
|
sender_user,
|
||||||
redacts: None,
|
&replacement_room,
|
||||||
timestamp: None,
|
&state_lock,
|
||||||
},
|
)?;
|
||||||
sender_user,
|
|
||||||
&replacement_room,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Moves any local aliases to the new room
|
// Moves any local aliases to the new room
|
||||||
|
@ -831,7 +692,7 @@ pub async fn upgrade_room_route(
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.alias
|
.alias
|
||||||
.set_alias(&alias, &replacement_room, sender_user)?;
|
.set_alias(&alias, &replacement_room)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the old room power levels
|
// Get the old room power levels
|
||||||
|
@ -852,24 +713,19 @@ pub async fn upgrade_room_route(
|
||||||
power_levels_event_content.invite = new_level;
|
power_levels_event_content.invite = new_level;
|
||||||
|
|
||||||
// Modify the power levels in the old room to prevent sending of events and inviting new users
|
// Modify the power levels in the old room to prevent sending of events and inviting new users
|
||||||
let _ = services()
|
let _ = services().rooms.timeline.build_and_append_pdu(
|
||||||
.rooms
|
PduBuilder {
|
||||||
.timeline
|
event_type: RoomEventType::RoomPowerLevels,
|
||||||
.build_and_append_pdu(
|
content: to_raw_value(&power_levels_event_content)
|
||||||
PduBuilder {
|
.expect("event is valid, we just created it"),
|
||||||
event_type: TimelineEventType::RoomPowerLevels,
|
unsigned: None,
|
||||||
content: to_raw_value(&power_levels_event_content)
|
state_key: Some("".to_owned()),
|
||||||
.expect("event is valid, we just created it"),
|
redacts: None,
|
||||||
unsigned: None,
|
},
|
||||||
state_key: Some("".to_owned()),
|
sender_user,
|
||||||
redacts: None,
|
&body.room_id,
|
||||||
timestamp: None,
|
&state_lock,
|
||||||
},
|
)?;
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
|
|
||||||
|
|
|
@ -31,8 +31,7 @@ pub async fn search_events_route(
|
||||||
.collect()
|
.collect()
|
||||||
});
|
});
|
||||||
|
|
||||||
// Use limit or else 10, with maximum 100
|
let limit = filter.limit.map_or(10, |l| u64::from(l) as usize);
|
||||||
let limit = filter.limit.map_or(10, u64::from).min(100) as usize;
|
|
||||||
|
|
||||||
let mut searches = Vec::new();
|
let mut searches = Vec::new();
|
||||||
|
|
||||||
|
@ -43,7 +42,7 @@ pub async fn search_events_route(
|
||||||
.is_joined(sender_user, &room_id)?
|
.is_joined(sender_user, &room_id)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::forbidden(),
|
ErrorKind::Forbidden,
|
||||||
"You don't have permission to view this room.",
|
"You don't have permission to view this room.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -82,22 +81,6 @@ pub async fn search_events_route(
|
||||||
|
|
||||||
let results: Vec<_> = results
|
let results: Vec<_> = results
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|result| {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.get_pdu_from_id(result)
|
|
||||||
.ok()?
|
|
||||||
.filter(|pdu| {
|
|
||||||
!pdu.is_redacted()
|
|
||||||
&& services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id)
|
|
||||||
.unwrap_or(false)
|
|
||||||
})
|
|
||||||
.map(|pdu| pdu.to_room_event())
|
|
||||||
})
|
|
||||||
.map(|result| {
|
.map(|result| {
|
||||||
Ok::<_, Error>(SearchResult {
|
Ok::<_, Error>(SearchResult {
|
||||||
context: EventContextResult {
|
context: EventContextResult {
|
||||||
|
@ -108,7 +91,11 @@ pub async fn search_events_route(
|
||||||
start: None,
|
start: None,
|
||||||
},
|
},
|
||||||
rank: None,
|
rank: None,
|
||||||
result: Some(result),
|
result: services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.get_pdu_from_id(result)?
|
||||||
|
.map(|pdu| pdu.to_room_event()),
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
.filter_map(|r| r.ok())
|
.filter_map(|r| r.ok())
|
||||||
|
|
|
@ -9,7 +9,7 @@ use ruma::{
|
||||||
UserId,
|
UserId,
|
||||||
};
|
};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use tracing::{info, warn};
|
use tracing::info;
|
||||||
|
|
||||||
#[derive(Debug, Deserialize)]
|
#[derive(Debug, Deserialize)]
|
||||||
struct Claims {
|
struct Claims {
|
||||||
|
@ -26,7 +26,6 @@ pub async fn get_login_types_route(
|
||||||
) -> Result<get_login_types::v3::Response> {
|
) -> Result<get_login_types::v3::Response> {
|
||||||
Ok(get_login_types::v3::Response::new(vec![
|
Ok(get_login_types::v3::Response::new(vec![
|
||||||
get_login_types::v3::LoginType::Password(Default::default()),
|
get_login_types::v3::LoginType::Password(Default::default()),
|
||||||
get_login_types::v3::LoginType::ApplicationService(Default::default()),
|
|
||||||
]))
|
]))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -42,43 +41,28 @@ pub async fn get_login_types_route(
|
||||||
/// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see
|
/// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see
|
||||||
/// supported login types.
|
/// supported login types.
|
||||||
pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Response> {
|
pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Response> {
|
||||||
// To allow deprecated login methods
|
|
||||||
#![allow(deprecated)]
|
|
||||||
// Validate login method
|
// Validate login method
|
||||||
// TODO: Other login methods
|
// TODO: Other login methods
|
||||||
let user_id = match &body.login_info {
|
let user_id = match &body.login_info {
|
||||||
login::v3::LoginInfo::Password(login::v3::Password {
|
login::v3::LoginInfo::Password(login::v3::Password {
|
||||||
identifier,
|
identifier,
|
||||||
password,
|
password,
|
||||||
user,
|
|
||||||
address: _,
|
|
||||||
medium: _,
|
|
||||||
}) => {
|
}) => {
|
||||||
let user_id = if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
|
let username = if let UserIdentifier::UserIdOrLocalpart(user_id) = identifier {
|
||||||
UserId::parse_with_server_name(
|
user_id.to_lowercase()
|
||||||
user_id.to_lowercase(),
|
|
||||||
services().globals.server_name(),
|
|
||||||
)
|
|
||||||
} else if let Some(user) = user {
|
|
||||||
UserId::parse(user)
|
|
||||||
} else {
|
} else {
|
||||||
warn!("Bad login type: {:?}", &body.login_info);
|
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
|
||||||
return Err(Error::BadRequest(ErrorKind::forbidden(), "Bad login type."));
|
};
|
||||||
}
|
let user_id =
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;
|
UserId::parse_with_server_name(username, services().globals.server_name())
|
||||||
|
.map_err(|_| {
|
||||||
if services().appservice.is_exclusive_user_id(&user_id).await {
|
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
|
||||||
return Err(Error::BadRequest(
|
})?;
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"User id reserved by appservice.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let hash = services()
|
let hash = services()
|
||||||
.users
|
.users
|
||||||
.password_hash(&user_id)?
|
.password_hash(&user_id)?
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::forbidden(),
|
ErrorKind::Forbidden,
|
||||||
"Wrong username or password.",
|
"Wrong username or password.",
|
||||||
))?;
|
))?;
|
||||||
|
|
||||||
|
@ -93,7 +77,7 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
||||||
|
|
||||||
if !hash_matches {
|
if !hash_matches {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::forbidden(),
|
ErrorKind::Forbidden,
|
||||||
"Wrong username or password.",
|
"Wrong username or password.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -109,20 +93,9 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
||||||
)
|
)
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Token is invalid."))?;
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Token is invalid."))?;
|
||||||
let username = token.claims.sub.to_lowercase();
|
let username = token.claims.sub.to_lowercase();
|
||||||
let user_id =
|
UserId::parse_with_server_name(username, services().globals.server_name()).map_err(
|
||||||
UserId::parse_with_server_name(username, services().globals.server_name())
|
|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."),
|
||||||
.map_err(|_| {
|
)?
|
||||||
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if services().appservice.is_exclusive_user_id(&user_id).await {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"User id reserved by appservice.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
user_id
|
|
||||||
} else {
|
} else {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Unknown,
|
ErrorKind::Unknown,
|
||||||
|
@ -130,41 +103,7 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
login::v3::LoginInfo::ApplicationService(login::v3::ApplicationService {
|
|
||||||
identifier,
|
|
||||||
user,
|
|
||||||
}) => {
|
|
||||||
let user_id = if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
|
|
||||||
UserId::parse_with_server_name(
|
|
||||||
user_id.to_lowercase(),
|
|
||||||
services().globals.server_name(),
|
|
||||||
)
|
|
||||||
} else if let Some(user) = user {
|
|
||||||
UserId::parse(user)
|
|
||||||
} else {
|
|
||||||
warn!("Bad login type: {:?}", &body.login_info);
|
|
||||||
return Err(Error::BadRequest(ErrorKind::forbidden(), "Bad login type."));
|
|
||||||
}
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;
|
|
||||||
|
|
||||||
if let Some(ref info) = body.appservice_info {
|
|
||||||
if !info.is_user_match(&user_id) {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"User is not in namespace.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::MissingToken,
|
|
||||||
"Missing appservice token.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
user_id
|
|
||||||
}
|
|
||||||
_ => {
|
_ => {
|
||||||
warn!("Unsupported or unknown login type: {:?}", &body.login_info);
|
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Unknown,
|
ErrorKind::Unknown,
|
||||||
"Unsupported login type.",
|
"Unsupported login type.",
|
||||||
|
@ -202,8 +141,6 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
||||||
|
|
||||||
info!("{} logged in", user_id);
|
info!("{} logged in", user_id);
|
||||||
|
|
||||||
// Homeservers are still required to send the `home_server` field
|
|
||||||
#[allow(deprecated)]
|
|
||||||
Ok(login::v3::Response {
|
Ok(login::v3::Response {
|
||||||
user_id,
|
user_id,
|
||||||
access_token: token,
|
access_token: token,
|
||||||
|
@ -227,15 +164,6 @@ pub async fn logout_route(body: Ruma<logout::v3::Request>) -> Result<logout::v3:
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if let Some(ref info) = body.appservice_info {
|
|
||||||
if !info.is_user_match(sender_user) {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"User is not in namespace.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
services().users.remove_device(sender_user, sender_device)?;
|
services().users.remove_device(sender_user, sender_device)?;
|
||||||
|
|
||||||
Ok(logout::v3::Response::new())
|
Ok(logout::v3::Response::new())
|
||||||
|
@ -257,20 +185,6 @@ pub async fn logout_all_route(
|
||||||
) -> Result<logout_all::v3::Response> {
|
) -> Result<logout_all::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if let Some(ref info) = body.appservice_info {
|
|
||||||
if !info.is_user_match(sender_user) {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"User is not in namespace.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::MissingToken,
|
|
||||||
"Missing appservice token.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
for device_id in services().users.all_device_ids(sender_user).flatten() {
|
for device_id in services().users.all_device_ids(sender_user).flatten() {
|
||||||
services().users.remove_device(sender_user, &device_id)?;
|
services().users.remove_device(sender_user, &device_id)?;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,34 +0,0 @@
|
||||||
use crate::{services, Result, Ruma};
|
|
||||||
use ruma::api::client::space::get_hierarchy;
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy``
|
|
||||||
///
|
|
||||||
/// Paginates over the space tree in a depth-first manner to locate child rooms of a given space.
|
|
||||||
pub async fn get_hierarchy_route(
|
|
||||||
body: Ruma<get_hierarchy::v1::Request>,
|
|
||||||
) -> Result<get_hierarchy::v1::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let skip = body
|
|
||||||
.from
|
|
||||||
.as_ref()
|
|
||||||
.and_then(|s| s.parse::<usize>().ok())
|
|
||||||
.unwrap_or(0);
|
|
||||||
|
|
||||||
let limit = body.limit.map_or(10, u64::from).min(100) as usize;
|
|
||||||
|
|
||||||
let max_depth = body.max_depth.map_or(3, u64::from).min(10) as usize + 1; // +1 to skip the space room itself
|
|
||||||
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.spaces
|
|
||||||
.get_hierarchy(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
limit,
|
|
||||||
skip,
|
|
||||||
max_depth,
|
|
||||||
body.suggested_only,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
|
|
@ -7,12 +7,15 @@ use ruma::{
|
||||||
state::{get_state_events, get_state_events_for_key, send_state_event},
|
state::{get_state_events, get_state_events_for_key, send_state_event},
|
||||||
},
|
},
|
||||||
events::{
|
events::{
|
||||||
room::canonical_alias::RoomCanonicalAliasEventContent, AnyStateEventContent, StateEventType,
|
room::{
|
||||||
|
canonical_alias::RoomCanonicalAliasEventContent,
|
||||||
|
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
|
||||||
|
},
|
||||||
|
AnyStateEventContent, StateEventType,
|
||||||
},
|
},
|
||||||
serde::Raw,
|
serde::Raw,
|
||||||
EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId,
|
EventId, RoomId, UserId,
|
||||||
};
|
};
|
||||||
use tracing::log::warn;
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}/{stateKey}`
|
/// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}/{stateKey}`
|
||||||
///
|
///
|
||||||
|
@ -32,11 +35,6 @@ pub async fn send_state_event_for_key_route(
|
||||||
&body.event_type,
|
&body.event_type,
|
||||||
&body.body.body, // Yes, I hate it too
|
&body.body.body, // Yes, I hate it too
|
||||||
body.state_key.to_owned(),
|
body.state_key.to_owned(),
|
||||||
if body.appservice_info.is_some() {
|
|
||||||
body.timestamp
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
@ -59,7 +57,7 @@ pub async fn send_state_event_for_empty_key_route(
|
||||||
// Forbid m.room.encryption if encryption is disabled
|
// Forbid m.room.encryption if encryption is disabled
|
||||||
if body.event_type == StateEventType::RoomEncryption && !services().globals.allow_encryption() {
|
if body.event_type == StateEventType::RoomEncryption && !services().globals.allow_encryption() {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::forbidden(),
|
ErrorKind::Forbidden,
|
||||||
"Encryption has been disabled",
|
"Encryption has been disabled",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -70,11 +68,6 @@ pub async fn send_state_event_for_empty_key_route(
|
||||||
&body.event_type.to_string().into(),
|
&body.event_type.to_string().into(),
|
||||||
&body.body.body,
|
&body.body.body,
|
||||||
body.state_key.to_owned(),
|
body.state_key.to_owned(),
|
||||||
if body.appservice_info.is_some() {
|
|
||||||
body.timestamp
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
@ -92,13 +85,32 @@ pub async fn get_state_events_route(
|
||||||
) -> Result<get_state_events::v3::Response> {
|
) -> Result<get_state_events::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
#[allow(clippy::blocks_in_if_conditions)]
|
||||||
|
// Users not in the room should not be able to access the state unless history_visibility is
|
||||||
|
// WorldReadable
|
||||||
if !services()
|
if !services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_cache
|
||||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
.is_joined(sender_user, &body.room_id)?
|
||||||
|
&& !matches!(
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")?
|
||||||
|
.map(|event| {
|
||||||
|
serde_json::from_str(event.content.get())
|
||||||
|
.map(|e: RoomHistoryVisibilityEventContent| e.history_visibility)
|
||||||
|
.map_err(|_| {
|
||||||
|
Error::bad_database(
|
||||||
|
"Invalid room history visibility event in database.",
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}),
|
||||||
|
Some(Ok(HistoryVisibility::WorldReadable))
|
||||||
|
)
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::forbidden(),
|
ErrorKind::Forbidden,
|
||||||
"You don't have permission to view the room state.",
|
"You don't have permission to view the room state.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -125,13 +137,32 @@ pub async fn get_state_events_for_key_route(
|
||||||
) -> Result<get_state_events_for_key::v3::Response> {
|
) -> Result<get_state_events_for_key::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
#[allow(clippy::blocks_in_if_conditions)]
|
||||||
|
// Users not in the room should not be able to access the state unless history_visibility is
|
||||||
|
// WorldReadable
|
||||||
if !services()
|
if !services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_cache
|
||||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
.is_joined(sender_user, &body.room_id)?
|
||||||
|
&& !matches!(
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")?
|
||||||
|
.map(|event| {
|
||||||
|
serde_json::from_str(event.content.get())
|
||||||
|
.map(|e: RoomHistoryVisibilityEventContent| e.history_visibility)
|
||||||
|
.map_err(|_| {
|
||||||
|
Error::bad_database(
|
||||||
|
"Invalid room history visibility event in database.",
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}),
|
||||||
|
Some(Ok(HistoryVisibility::WorldReadable))
|
||||||
|
)
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::forbidden(),
|
ErrorKind::Forbidden,
|
||||||
"You don't have permission to view the room state.",
|
"You don't have permission to view the room state.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -140,13 +171,10 @@ pub async fn get_state_events_for_key_route(
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(&body.room_id, &body.event_type, &body.state_key)?
|
.room_state_get(&body.room_id, &body.event_type, &body.state_key)?
|
||||||
.ok_or_else(|| {
|
.ok_or(Error::BadRequest(
|
||||||
warn!(
|
ErrorKind::NotFound,
|
||||||
"State event {:?} not found in room {:?}",
|
"State event not found.",
|
||||||
&body.event_type, &body.room_id
|
))?;
|
||||||
);
|
|
||||||
Error::BadRequest(ErrorKind::NotFound, "State event not found.")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(get_state_events_for_key::v3::Response {
|
Ok(get_state_events_for_key::v3::Response {
|
||||||
content: serde_json::from_str(event.content.get())
|
content: serde_json::from_str(event.content.get())
|
||||||
|
@ -164,13 +192,32 @@ pub async fn get_state_events_for_empty_key_route(
|
||||||
) -> Result<RumaResponse<get_state_events_for_key::v3::Response>> {
|
) -> Result<RumaResponse<get_state_events_for_key::v3::Response>> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
#[allow(clippy::blocks_in_if_conditions)]
|
||||||
|
// Users not in the room should not be able to access the state unless history_visibility is
|
||||||
|
// WorldReadable
|
||||||
if !services()
|
if !services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_cache
|
||||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
.is_joined(sender_user, &body.room_id)?
|
||||||
|
&& !matches!(
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")?
|
||||||
|
.map(|event| {
|
||||||
|
serde_json::from_str(event.content.get())
|
||||||
|
.map(|e: RoomHistoryVisibilityEventContent| e.history_visibility)
|
||||||
|
.map_err(|_| {
|
||||||
|
Error::bad_database(
|
||||||
|
"Invalid room history visibility event in database.",
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}),
|
||||||
|
Some(Ok(HistoryVisibility::WorldReadable))
|
||||||
|
)
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::forbidden(),
|
ErrorKind::Forbidden,
|
||||||
"You don't have permission to view the room state.",
|
"You don't have permission to view the room state.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -179,13 +226,10 @@ pub async fn get_state_events_for_empty_key_route(
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(&body.room_id, &body.event_type, "")?
|
.room_state_get(&body.room_id, &body.event_type, "")?
|
||||||
.ok_or_else(|| {
|
.ok_or(Error::BadRequest(
|
||||||
warn!(
|
ErrorKind::NotFound,
|
||||||
"State event {:?} not found in room {:?}",
|
"State event not found.",
|
||||||
&body.event_type, &body.room_id
|
))?;
|
||||||
);
|
|
||||||
Error::BadRequest(ErrorKind::NotFound, "State event not found.")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(get_state_events_for_key::v3::Response {
|
Ok(get_state_events_for_key::v3::Response {
|
||||||
content: serde_json::from_str(event.content.get())
|
content: serde_json::from_str(event.content.get())
|
||||||
|
@ -200,7 +244,6 @@ async fn send_state_event_for_key_helper(
|
||||||
event_type: &StateEventType,
|
event_type: &StateEventType,
|
||||||
json: &Raw<AnyStateEventContent>,
|
json: &Raw<AnyStateEventContent>,
|
||||||
state_key: String,
|
state_key: String,
|
||||||
timestamp: Option<MilliSecondsSinceUnixEpoch>,
|
|
||||||
) -> Result<Arc<EventId>> {
|
) -> Result<Arc<EventId>> {
|
||||||
let sender_user = sender;
|
let sender_user = sender;
|
||||||
|
|
||||||
|
@ -225,7 +268,7 @@ async fn send_state_event_for_key_helper(
|
||||||
.is_none()
|
.is_none()
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::forbidden(),
|
ErrorKind::Forbidden,
|
||||||
"You are only allowed to send canonical_alias \
|
"You are only allowed to send canonical_alias \
|
||||||
events when it's aliases already exists",
|
events when it's aliases already exists",
|
||||||
));
|
));
|
||||||
|
@ -238,29 +281,24 @@ async fn send_state_event_for_key_helper(
|
||||||
.globals
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.await
|
.unwrap()
|
||||||
.entry(room_id.to_owned())
|
.entry(room_id.to_owned())
|
||||||
.or_default(),
|
.or_default(),
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
let event_id = services()
|
let event_id = services().rooms.timeline.build_and_append_pdu(
|
||||||
.rooms
|
PduBuilder {
|
||||||
.timeline
|
event_type: event_type.to_string().into(),
|
||||||
.build_and_append_pdu(
|
content: serde_json::from_str(json.json().get()).expect("content is valid json"),
|
||||||
PduBuilder {
|
unsigned: None,
|
||||||
event_type: event_type.to_string().into(),
|
state_key: Some(state_key),
|
||||||
content: serde_json::from_str(json.json().get()).expect("content is valid json"),
|
redacts: None,
|
||||||
unsigned: None,
|
},
|
||||||
state_key: Some(state_key),
|
sender_user,
|
||||||
redacts: None,
|
room_id,
|
||||||
timestamp,
|
&state_lock,
|
||||||
},
|
)?;
|
||||||
sender_user,
|
|
||||||
room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(event_id)
|
Ok(event_id)
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,49 +0,0 @@
|
||||||
use ruma::api::client::{error::ErrorKind, threads::get_threads};
|
|
||||||
|
|
||||||
use crate::{services, Error, Result, Ruma};
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/threads`
|
|
||||||
pub async fn get_threads_route(
|
|
||||||
body: Ruma<get_threads::v1::Request>,
|
|
||||||
) -> Result<get_threads::v1::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
// Use limit or else 10, with maximum 100
|
|
||||||
let limit = body
|
|
||||||
.limit
|
|
||||||
.and_then(|l| l.try_into().ok())
|
|
||||||
.unwrap_or(10)
|
|
||||||
.min(100);
|
|
||||||
|
|
||||||
let from = if let Some(from) = &body.from {
|
|
||||||
from.parse()
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, ""))?
|
|
||||||
} else {
|
|
||||||
u64::MAX
|
|
||||||
};
|
|
||||||
|
|
||||||
let threads = services()
|
|
||||||
.rooms
|
|
||||||
.threads
|
|
||||||
.threads_until(sender_user, &body.room_id, from, &body.include)?
|
|
||||||
.take(limit)
|
|
||||||
.filter_map(|r| r.ok())
|
|
||||||
.filter(|(_, pdu)| {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.user_can_see_event(sender_user, &body.room_id, &pdu.event_id)
|
|
||||||
.unwrap_or(false)
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let next_batch = threads.last().map(|(count, _)| count.to_string());
|
|
||||||
|
|
||||||
Ok(get_threads::v1::Response {
|
|
||||||
chunk: threads
|
|
||||||
.into_iter()
|
|
||||||
.map(|(_, pdu)| pdu.to_room_event())
|
|
||||||
.collect(),
|
|
||||||
next_batch,
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
use ruma::events::ToDeviceEventType;
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
use crate::{services, Error, Result, Ruma};
|
use crate::{services, Error, Result, Ruma};
|
||||||
|
@ -41,7 +42,7 @@ pub async fn send_event_to_device_route(
|
||||||
serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice(
|
serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice(
|
||||||
DirectDeviceContent {
|
DirectDeviceContent {
|
||||||
sender: sender_user.clone(),
|
sender: sender_user.clone(),
|
||||||
ev_type: body.event_type.clone(),
|
ev_type: ToDeviceEventType::from(&*body.event_type),
|
||||||
message_id: count.to_string().into(),
|
message_id: count.to_string().into(),
|
||||||
messages,
|
messages,
|
||||||
},
|
},
|
||||||
|
@ -59,7 +60,7 @@ pub async fn send_event_to_device_route(
|
||||||
sender_user,
|
sender_user,
|
||||||
target_user_id,
|
target_user_id,
|
||||||
target_device_id,
|
target_device_id,
|
||||||
&body.event_type.to_string(),
|
&body.event_type,
|
||||||
event.deserialize_as().map_err(|_| {
|
event.deserialize_as().map_err(|_| {
|
||||||
Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid")
|
Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid")
|
||||||
})?,
|
})?,
|
||||||
|
@ -72,7 +73,7 @@ pub async fn send_event_to_device_route(
|
||||||
sender_user,
|
sender_user,
|
||||||
target_user_id,
|
target_user_id,
|
||||||
&target_device_id?,
|
&target_device_id?,
|
||||||
&body.event_type.to_string(),
|
&body.event_type,
|
||||||
event.deserialize_as().map_err(|_| {
|
event.deserialize_as().map_err(|_| {
|
||||||
Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid")
|
Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid")
|
||||||
})?,
|
})?,
|
||||||
|
|
|
@ -17,29 +17,23 @@ pub async fn create_typing_event_route(
|
||||||
.is_joined(sender_user, &body.room_id)?
|
.is_joined(sender_user, &body.room_id)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::forbidden(),
|
ErrorKind::Forbidden,
|
||||||
"You are not in this room.",
|
"You are not in this room.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Typing::Yes(duration) = body.state {
|
if let Typing::Yes(duration) = body.state {
|
||||||
services()
|
services().rooms.edus.typing.typing_add(
|
||||||
.rooms
|
sender_user,
|
||||||
.edus
|
&body.room_id,
|
||||||
.typing
|
duration.as_millis() as u64 + utils::millis_since_unix_epoch(),
|
||||||
.typing_add(
|
)?;
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
duration.as_millis() as u64 + utils::millis_since_unix_epoch(),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
} else {
|
} else {
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.edus
|
.edus
|
||||||
.typing
|
.typing
|
||||||
.typing_remove(sender_user, &body.room_id)
|
.typing_remove(sender_user, &body.room_id)?;
|
||||||
.await?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(create_typing_event::v3::Response {})
|
Ok(create_typing_event::v3::Response {})
|
||||||
|
|
|
@ -23,13 +23,10 @@ pub async fn get_supported_versions_route(
|
||||||
"r0.6.0".to_owned(),
|
"r0.6.0".to_owned(),
|
||||||
"v1.1".to_owned(),
|
"v1.1".to_owned(),
|
||||||
"v1.2".to_owned(),
|
"v1.2".to_owned(),
|
||||||
"v1.3".to_owned(),
|
|
||||||
"v1.4".to_owned(),
|
|
||||||
"v1.5".to_owned(),
|
|
||||||
],
|
],
|
||||||
unstable_features: BTreeMap::from_iter([
|
unstable_features: BTreeMap::from_iter([
|
||||||
("org.matrix.e2e_cross_signing".to_owned(), true),
|
("org.matrix.e2e_cross_signing".to_owned(), true),
|
||||||
("org.matrix.msc3916.stable".to_owned(), true),
|
("org.matrix.msc2285.stable".to_owned(), true),
|
||||||
]),
|
]),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -48,9 +48,6 @@ pub async fn search_users_route(
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
// It's a matching user, but is the sender allowed to see them?
|
|
||||||
let mut user_visible = false;
|
|
||||||
|
|
||||||
let user_is_in_public_rooms = services()
|
let user_is_in_public_rooms = services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
|
@ -72,26 +69,22 @@ pub async fn search_users_route(
|
||||||
});
|
});
|
||||||
|
|
||||||
if user_is_in_public_rooms {
|
if user_is_in_public_rooms {
|
||||||
user_visible = true;
|
return Some(user);
|
||||||
} else {
|
|
||||||
let user_is_in_shared_rooms = services()
|
|
||||||
.rooms
|
|
||||||
.user
|
|
||||||
.get_shared_rooms(vec![sender_user.clone(), user_id])
|
|
||||||
.ok()?
|
|
||||||
.next()
|
|
||||||
.is_some();
|
|
||||||
|
|
||||||
if user_is_in_shared_rooms {
|
|
||||||
user_visible = true;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if !user_visible {
|
let user_is_in_shared_rooms = services()
|
||||||
return None;
|
.rooms
|
||||||
|
.user
|
||||||
|
.get_shared_rooms(vec![sender_user.clone(), user_id])
|
||||||
|
.ok()?
|
||||||
|
.next()
|
||||||
|
.is_some();
|
||||||
|
|
||||||
|
if user_is_in_shared_rooms {
|
||||||
|
return Some(user);
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(user)
|
None
|
||||||
});
|
});
|
||||||
|
|
||||||
let results = users.by_ref().take(limit).collect();
|
let results = users.by_ref().take(limit).collect();
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
use crate::{services, Result, Ruma};
|
use crate::{services, Result, Ruma};
|
||||||
use base64::{engine::general_purpose, Engine as _};
|
|
||||||
use hmac::{Hmac, Mac};
|
use hmac::{Hmac, Mac};
|
||||||
use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch};
|
use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch};
|
||||||
use sha1::Sha1;
|
use sha1::Sha1;
|
||||||
|
@ -29,7 +28,7 @@ pub async fn turn_server_route(
|
||||||
.expect("HMAC can take key of any size");
|
.expect("HMAC can take key of any size");
|
||||||
mac.update(username.as_bytes());
|
mac.update(username.as_bytes());
|
||||||
|
|
||||||
let password: String = general_purpose::STANDARD.encode(mac.finalize().into_bytes());
|
let password: String = base64::encode_config(mac.finalize().into_bytes(), base64::STANDARD);
|
||||||
|
|
||||||
(username, password)
|
(username, password)
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -1,22 +0,0 @@
|
||||||
use ruma::api::client::discovery::discover_homeserver::{
|
|
||||||
self, HomeserverInfo, SlidingSyncProxyInfo,
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{services, Result, Ruma};
|
|
||||||
|
|
||||||
/// # `GET /.well-known/matrix/client`
|
|
||||||
///
|
|
||||||
/// Returns the client server discovery information.
|
|
||||||
pub async fn well_known_client(
|
|
||||||
_body: Ruma<discover_homeserver::Request>,
|
|
||||||
) -> Result<discover_homeserver::Response> {
|
|
||||||
let client_url = services().globals.well_known_client();
|
|
||||||
|
|
||||||
Ok(discover_homeserver::Response {
|
|
||||||
homeserver: HomeserverInfo {
|
|
||||||
base_url: client_url.clone(),
|
|
||||||
},
|
|
||||||
identity_server: None,
|
|
||||||
sliding_sync_proxy: Some(SlidingSyncProxyInfo { url: client_url }),
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -2,72 +2,52 @@ use std::{collections::BTreeMap, iter::FromIterator, str};
|
||||||
|
|
||||||
use axum::{
|
use axum::{
|
||||||
async_trait,
|
async_trait,
|
||||||
body::Body,
|
body::{Full, HttpBody},
|
||||||
extract::{FromRequest, Path},
|
extract::{
|
||||||
|
rejection::TypedHeaderRejectionReason, FromRequest, Path, RequestParts, TypedHeader,
|
||||||
|
},
|
||||||
|
headers::{
|
||||||
|
authorization::{Bearer, Credentials},
|
||||||
|
Authorization,
|
||||||
|
},
|
||||||
response::{IntoResponse, Response},
|
response::{IntoResponse, Response},
|
||||||
RequestExt, RequestPartsExt,
|
BoxError,
|
||||||
};
|
};
|
||||||
use axum_extra::{
|
use bytes::{BufMut, Bytes, BytesMut};
|
||||||
headers::{authorization::Bearer, Authorization},
|
use http::StatusCode;
|
||||||
typed_header::TypedHeaderRejectionReason,
|
|
||||||
TypedHeader,
|
|
||||||
};
|
|
||||||
use bytes::{BufMut, BytesMut};
|
|
||||||
use http::{Request, StatusCode};
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse},
|
api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse},
|
||||||
server_util::authorization::XMatrix,
|
CanonicalJsonValue, OwnedDeviceId, OwnedServerName, UserId,
|
||||||
CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedUserId, UserId,
|
|
||||||
};
|
};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use tracing::{debug, error, warn};
|
use tracing::{debug, error, warn};
|
||||||
|
|
||||||
use super::{Ruma, RumaResponse};
|
use super::{Ruma, RumaResponse};
|
||||||
use crate::{service::appservice::RegistrationInfo, services, Error, Result};
|
use crate::{services, Error, Result};
|
||||||
|
|
||||||
enum Token {
|
|
||||||
Appservice(Box<RegistrationInfo>),
|
|
||||||
User((OwnedUserId, OwnedDeviceId)),
|
|
||||||
Invalid,
|
|
||||||
None,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T, S> FromRequest<S> for Ruma<T>
|
impl<T, B> FromRequest<B> for Ruma<T>
|
||||||
where
|
where
|
||||||
T: IncomingRequest,
|
T: IncomingRequest,
|
||||||
|
B: HttpBody + Send,
|
||||||
|
B::Data: Send,
|
||||||
|
B::Error: Into<BoxError>,
|
||||||
{
|
{
|
||||||
type Rejection = Error;
|
type Rejection = Error;
|
||||||
|
|
||||||
async fn from_request(req: Request<Body>, _state: &S) -> Result<Self, Self::Rejection> {
|
async fn from_request(req: &mut RequestParts<B>) -> Result<Self, Self::Rejection> {
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
struct QueryParams {
|
struct QueryParams {
|
||||||
access_token: Option<String>,
|
access_token: Option<String>,
|
||||||
user_id: Option<String>,
|
user_id: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
let (mut parts, mut body) = {
|
|
||||||
let limited_req = req.with_limited_body();
|
|
||||||
let (parts, body) = limited_req.into_parts();
|
|
||||||
let body = axum::body::to_bytes(
|
|
||||||
body,
|
|
||||||
services()
|
|
||||||
.globals
|
|
||||||
.max_request_size()
|
|
||||||
.try_into()
|
|
||||||
.unwrap_or(usize::MAX),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?;
|
|
||||||
(parts, body)
|
|
||||||
};
|
|
||||||
|
|
||||||
let metadata = T::METADATA;
|
let metadata = T::METADATA;
|
||||||
let auth_header: Option<TypedHeader<Authorization<Bearer>>> = parts.extract().await?;
|
let auth_header = Option::<TypedHeader<Authorization<Bearer>>>::from_request(req).await?;
|
||||||
let path_params: Path<Vec<String>> = parts.extract().await?;
|
let path_params = Path::<Vec<String>>::from_request(req).await?;
|
||||||
|
|
||||||
let query = parts.uri.query().unwrap_or_default();
|
let query = req.uri().query().unwrap_or_default();
|
||||||
let query_params: QueryParams = match serde_html_form::from_str(query) {
|
let query_params: QueryParams = match ruma::serde::urlencoded::from_str(query) {
|
||||||
Ok(params) => params,
|
Ok(params) => params,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!(%query, "Failed to deserialize query parameters: {}", e);
|
error!(%query, "Failed to deserialize query parameters: {}", e);
|
||||||
|
@ -83,230 +63,188 @@ where
|
||||||
None => query_params.access_token.as_deref(),
|
None => query_params.access_token.as_deref(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let token = if let Some(token) = token {
|
let mut body = Bytes::from_request(req)
|
||||||
if let Some(reg_info) = services().appservice.find_from_token(token).await {
|
.await
|
||||||
Token::Appservice(Box::new(reg_info.clone()))
|
.map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?;
|
||||||
} else if let Some((user_id, device_id)) = services().users.find_from_token(token)? {
|
|
||||||
Token::User((user_id, OwnedDeviceId::from(device_id)))
|
|
||||||
} else {
|
|
||||||
Token::Invalid
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Token::None
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut json_body = serde_json::from_slice::<CanonicalJsonValue>(&body).ok();
|
let mut json_body = serde_json::from_slice::<CanonicalJsonValue>(&body).ok();
|
||||||
|
|
||||||
let (sender_user, sender_device, sender_servername, appservice_info) =
|
let appservices = services().appservice.all().unwrap();
|
||||||
match (metadata.authentication, token) {
|
let appservice_registration = appservices.iter().find(|(_id, registration)| {
|
||||||
(_, Token::Invalid) => {
|
registration
|
||||||
// OpenID endpoint uses a query param with the same name, drop this once query params for user auth are removed from the spec
|
.get("as_token")
|
||||||
if query_params.access_token.is_some() {
|
.and_then(|as_token| as_token.as_str())
|
||||||
(None, None, None, None)
|
.map_or(false, |as_token| token == Some(as_token))
|
||||||
} else {
|
});
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::UnknownToken { soft_logout: false },
|
let (sender_user, sender_device, sender_servername, from_appservice) =
|
||||||
"Unknown access token.",
|
if let Some((_id, registration)) = appservice_registration {
|
||||||
));
|
match metadata.authentication {
|
||||||
}
|
AuthScheme::AccessToken => {
|
||||||
}
|
let user_id = query_params.user_id.map_or_else(
|
||||||
(AuthScheme::AccessToken, Token::Appservice(info)) => {
|
|
||||||
let user_id = query_params
|
|
||||||
.user_id
|
|
||||||
.map_or_else(
|
|
||||||
|| {
|
|| {
|
||||||
UserId::parse_with_server_name(
|
UserId::parse_with_server_name(
|
||||||
info.registration.sender_localpart.as_str(),
|
registration
|
||||||
|
.get("sender_localpart")
|
||||||
|
.unwrap()
|
||||||
|
.as_str()
|
||||||
|
.unwrap(),
|
||||||
services().globals.server_name(),
|
services().globals.server_name(),
|
||||||
)
|
)
|
||||||
|
.unwrap()
|
||||||
},
|
},
|
||||||
UserId::parse,
|
|s| UserId::parse(s).unwrap(),
|
||||||
)
|
);
|
||||||
.map_err(|_| {
|
|
||||||
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if !info.is_user_match(&user_id) {
|
if !services().users.exists(&user_id).unwrap() {
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"User is not in namespace.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
if !services().users.exists(&user_id)? {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::forbidden(),
|
|
||||||
"User does not exist.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
(Some(user_id), None, None, Some(*info))
|
|
||||||
}
|
|
||||||
(
|
|
||||||
AuthScheme::None
|
|
||||||
| AuthScheme::AppserviceToken
|
|
||||||
| AuthScheme::AccessTokenOptional,
|
|
||||||
Token::Appservice(info),
|
|
||||||
) => (None, None, None, Some(*info)),
|
|
||||||
(AuthScheme::AccessToken, Token::None) => {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::MissingToken,
|
|
||||||
"Missing access token.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
(
|
|
||||||
AuthScheme::AccessToken | AuthScheme::AccessTokenOptional | AuthScheme::None,
|
|
||||||
Token::User((user_id, device_id)),
|
|
||||||
) => (Some(user_id), Some(device_id), None, None),
|
|
||||||
(AuthScheme::ServerSignatures, Token::None) => {
|
|
||||||
let TypedHeader(Authorization(x_matrix)) = parts
|
|
||||||
.extract::<TypedHeader<Authorization<XMatrix>>>()
|
|
||||||
.await
|
|
||||||
.map_err(|e| {
|
|
||||||
warn!("Missing or invalid Authorization header: {}", e);
|
|
||||||
|
|
||||||
let msg = match e.reason() {
|
|
||||||
TypedHeaderRejectionReason::Missing => {
|
|
||||||
"Missing Authorization header."
|
|
||||||
}
|
|
||||||
TypedHeaderRejectionReason::Error(_) => {
|
|
||||||
"Invalid X-Matrix signatures."
|
|
||||||
}
|
|
||||||
_ => "Unknown header-related error",
|
|
||||||
};
|
|
||||||
|
|
||||||
Error::BadRequest(ErrorKind::forbidden(), msg)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if let Some(dest) = x_matrix.destination {
|
|
||||||
if dest != services().globals.server_name() {
|
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Unauthorized,
|
ErrorKind::Forbidden,
|
||||||
"X-Matrix destination field does not match server name.",
|
"User does not exist.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
};
|
|
||||||
|
|
||||||
let origin_signatures = BTreeMap::from_iter([(
|
// TODO: Check if appservice is allowed to be that user
|
||||||
x_matrix.key.clone(),
|
(Some(user_id), None, None, true)
|
||||||
CanonicalJsonValue::String(x_matrix.sig.to_string()),
|
}
|
||||||
)]);
|
AuthScheme::ServerSignatures => (None, None, None, true),
|
||||||
|
AuthScheme::None => (None, None, None, true),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
match metadata.authentication {
|
||||||
|
AuthScheme::AccessToken => {
|
||||||
|
let token = match token {
|
||||||
|
Some(token) => token,
|
||||||
|
_ => {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::MissingToken,
|
||||||
|
"Missing access token.",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let signatures = BTreeMap::from_iter([(
|
match services().users.find_from_token(token).unwrap() {
|
||||||
x_matrix.origin.as_str().to_owned(),
|
None => {
|
||||||
CanonicalJsonValue::Object(
|
return Err(Error::BadRequest(
|
||||||
origin_signatures
|
ErrorKind::UnknownToken { soft_logout: false },
|
||||||
.into_iter()
|
"Unknown access token.",
|
||||||
.map(|(k, v)| (k.to_string(), v))
|
))
|
||||||
.collect(),
|
}
|
||||||
),
|
Some((user_id, device_id)) => (
|
||||||
)]);
|
Some(user_id),
|
||||||
|
Some(OwnedDeviceId::from(device_id)),
|
||||||
let mut request_map = BTreeMap::from_iter([
|
None,
|
||||||
(
|
false,
|
||||||
"method".to_owned(),
|
|
||||||
CanonicalJsonValue::String(parts.method.to_string()),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"uri".to_owned(),
|
|
||||||
CanonicalJsonValue::String(parts.uri.to_string()),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"origin".to_owned(),
|
|
||||||
CanonicalJsonValue::String(x_matrix.origin.as_str().to_owned()),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"destination".to_owned(),
|
|
||||||
CanonicalJsonValue::String(
|
|
||||||
services().globals.server_name().as_str().to_owned(),
|
|
||||||
),
|
),
|
||||||
),
|
|
||||||
(
|
|
||||||
"signatures".to_owned(),
|
|
||||||
CanonicalJsonValue::Object(signatures),
|
|
||||||
),
|
|
||||||
]);
|
|
||||||
|
|
||||||
if let Some(json_body) = &json_body {
|
|
||||||
request_map.insert("content".to_owned(), json_body.clone());
|
|
||||||
};
|
|
||||||
|
|
||||||
let keys_result = services()
|
|
||||||
.rooms
|
|
||||||
.event_handler
|
|
||||||
.fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_string()], false)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let keys = match keys_result {
|
|
||||||
Ok(b) => b,
|
|
||||||
Err(e) => {
|
|
||||||
warn!("Failed to fetch signing keys: {}", e);
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::forbidden(),
|
|
||||||
"Failed to fetch signing keys.",
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
|
AuthScheme::ServerSignatures => {
|
||||||
|
let TypedHeader(Authorization(x_matrix)) =
|
||||||
|
TypedHeader::<Authorization<XMatrix>>::from_request(req)
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
warn!("Missing or invalid Authorization header: {}", e);
|
||||||
|
|
||||||
// Only verify_keys that are currently valid should be used for validating requests
|
let msg = match e.reason() {
|
||||||
// as per MSC4029
|
TypedHeaderRejectionReason::Missing => {
|
||||||
let pub_key_map = BTreeMap::from_iter([(
|
"Missing Authorization header."
|
||||||
x_matrix.origin.as_str().to_owned(),
|
}
|
||||||
if keys.valid_until_ts > MilliSecondsSinceUnixEpoch::now() {
|
TypedHeaderRejectionReason::Error(_) => {
|
||||||
keys.verify_keys
|
"Invalid X-Matrix signatures."
|
||||||
.into_iter()
|
}
|
||||||
.map(|(id, key)| (id, key.key))
|
_ => "Unknown header-related error",
|
||||||
.collect()
|
};
|
||||||
} else {
|
|
||||||
BTreeMap::new()
|
|
||||||
},
|
|
||||||
)]);
|
|
||||||
|
|
||||||
match ruma::signatures::verify_json(&pub_key_map, &request_map) {
|
Error::BadRequest(ErrorKind::Forbidden, msg)
|
||||||
Ok(()) => (None, None, Some(x_matrix.origin), None),
|
})?;
|
||||||
Err(e) => {
|
|
||||||
warn!(
|
|
||||||
"Failed to verify json request from {}: {}\n{:?}",
|
|
||||||
x_matrix.origin, e, request_map
|
|
||||||
);
|
|
||||||
|
|
||||||
if parts.uri.to_string().contains('@') {
|
let origin_signatures = BTreeMap::from_iter([(
|
||||||
|
x_matrix.key.clone(),
|
||||||
|
CanonicalJsonValue::String(x_matrix.sig),
|
||||||
|
)]);
|
||||||
|
|
||||||
|
let signatures = BTreeMap::from_iter([(
|
||||||
|
x_matrix.origin.as_str().to_owned(),
|
||||||
|
CanonicalJsonValue::Object(origin_signatures),
|
||||||
|
)]);
|
||||||
|
|
||||||
|
let mut request_map = BTreeMap::from_iter([
|
||||||
|
(
|
||||||
|
"method".to_owned(),
|
||||||
|
CanonicalJsonValue::String(req.method().to_string()),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"uri".to_owned(),
|
||||||
|
CanonicalJsonValue::String(req.uri().to_string()),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"origin".to_owned(),
|
||||||
|
CanonicalJsonValue::String(x_matrix.origin.as_str().to_owned()),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"destination".to_owned(),
|
||||||
|
CanonicalJsonValue::String(
|
||||||
|
services().globals.server_name().as_str().to_owned(),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"signatures".to_owned(),
|
||||||
|
CanonicalJsonValue::Object(signatures),
|
||||||
|
),
|
||||||
|
]);
|
||||||
|
|
||||||
|
if let Some(json_body) = &json_body {
|
||||||
|
request_map.insert("content".to_owned(), json_body.clone());
|
||||||
|
};
|
||||||
|
|
||||||
|
let keys_result = services()
|
||||||
|
.rooms
|
||||||
|
.event_handler
|
||||||
|
.fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_owned()])
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let keys = match keys_result {
|
||||||
|
Ok(b) => b,
|
||||||
|
Err(e) => {
|
||||||
|
warn!("Failed to fetch signing keys: {}", e);
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::Forbidden,
|
||||||
|
"Failed to fetch signing keys.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let pub_key_map =
|
||||||
|
BTreeMap::from_iter([(x_matrix.origin.as_str().to_owned(), keys)]);
|
||||||
|
|
||||||
|
match ruma::signatures::verify_json(&pub_key_map, &request_map) {
|
||||||
|
Ok(()) => (None, None, Some(x_matrix.origin), false),
|
||||||
|
Err(e) => {
|
||||||
warn!(
|
warn!(
|
||||||
"Request uri contained '@' character. Make sure your \
|
"Failed to verify json request from {}: {}\n{:?}",
|
||||||
|
x_matrix.origin, e, request_map
|
||||||
|
);
|
||||||
|
|
||||||
|
if req.uri().to_string().contains('@') {
|
||||||
|
warn!(
|
||||||
|
"Request uri contained '@' character. Make sure your \
|
||||||
reverse proxy gives Conduit the raw uri (apache: use \
|
reverse proxy gives Conduit the raw uri (apache: use \
|
||||||
nocanon)"
|
nocanon)"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::forbidden(),
|
ErrorKind::Forbidden,
|
||||||
"Failed to verify X-Matrix signatures.",
|
"Failed to verify X-Matrix signatures.",
|
||||||
));
|
));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
AuthScheme::None => (None, None, None, false),
|
||||||
(
|
|
||||||
AuthScheme::None
|
|
||||||
| AuthScheme::AppserviceToken
|
|
||||||
| AuthScheme::AccessTokenOptional,
|
|
||||||
Token::None,
|
|
||||||
) => (None, None, None, None),
|
|
||||||
(AuthScheme::ServerSignatures, Token::Appservice(_) | Token::User(_)) => {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Unauthorized,
|
|
||||||
"Only server signatures should be used on this endpoint.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
(AuthScheme::AppserviceToken, Token::User(_)) => {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Unauthorized,
|
|
||||||
"Only appservice access tokens should be used on this endpoint.",
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut http_request = Request::builder().uri(parts.uri).method(parts.method);
|
let mut http_request = http::Request::builder().uri(req.uri()).method(req.method());
|
||||||
*http_request.headers_mut().unwrap() = parts.headers;
|
*http_request.headers_mut().unwrap() = req.headers().clone();
|
||||||
|
|
||||||
if let Some(CanonicalJsonValue::Object(json_body)) = &mut json_body {
|
if let Some(CanonicalJsonValue::Object(json_body)) = &mut json_body {
|
||||||
let user_id = sender_user.clone().unwrap_or_else(|| {
|
let user_id = sender_user.clone().unwrap_or_else(|| {
|
||||||
|
@ -343,8 +281,7 @@ where
|
||||||
debug!("{:?}", http_request);
|
debug!("{:?}", http_request);
|
||||||
|
|
||||||
let body = T::try_from_http_request(http_request, &path_params).map_err(|e| {
|
let body = T::try_from_http_request(http_request, &path_params).map_err(|e| {
|
||||||
warn!("try_from_http_request failed: {:?}", e);
|
warn!("{:?}\n{:?}", e, json_body);
|
||||||
debug!("JSON body: {:?}", json_body);
|
|
||||||
Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.")
|
Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.")
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
|
@ -353,16 +290,73 @@ where
|
||||||
sender_user,
|
sender_user,
|
||||||
sender_device,
|
sender_device,
|
||||||
sender_servername,
|
sender_servername,
|
||||||
appservice_info,
|
from_appservice,
|
||||||
json_body,
|
json_body,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct XMatrix {
|
||||||
|
origin: OwnedServerName,
|
||||||
|
key: String, // KeyName?
|
||||||
|
sig: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Credentials for XMatrix {
|
||||||
|
const SCHEME: &'static str = "X-Matrix";
|
||||||
|
|
||||||
|
fn decode(value: &http::HeaderValue) -> Option<Self> {
|
||||||
|
debug_assert!(
|
||||||
|
value.as_bytes().starts_with(b"X-Matrix "),
|
||||||
|
"HeaderValue to decode should start with \"X-Matrix ..\", received = {value:?}",
|
||||||
|
);
|
||||||
|
|
||||||
|
let parameters = str::from_utf8(&value.as_bytes()["X-Matrix ".len()..])
|
||||||
|
.ok()?
|
||||||
|
.trim_start();
|
||||||
|
|
||||||
|
let mut origin = None;
|
||||||
|
let mut key = None;
|
||||||
|
let mut sig = None;
|
||||||
|
|
||||||
|
for entry in parameters.split_terminator(',') {
|
||||||
|
let (name, value) = entry.split_once('=')?;
|
||||||
|
|
||||||
|
// It's not at all clear why some fields are quoted and others not in the spec,
|
||||||
|
// let's simply accept either form for every field.
|
||||||
|
let value = value
|
||||||
|
.strip_prefix('"')
|
||||||
|
.and_then(|rest| rest.strip_suffix('"'))
|
||||||
|
.unwrap_or(value);
|
||||||
|
|
||||||
|
// FIXME: Catch multiple fields of the same name
|
||||||
|
match name {
|
||||||
|
"origin" => origin = Some(value.try_into().ok()?),
|
||||||
|
"key" => key = Some(value.to_owned()),
|
||||||
|
"sig" => sig = Some(value.to_owned()),
|
||||||
|
_ => debug!(
|
||||||
|
"Unexpected field `{}` in X-Matrix Authorization header",
|
||||||
|
name
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(Self {
|
||||||
|
origin: origin?,
|
||||||
|
key: key?,
|
||||||
|
sig: sig?,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn encode(&self) -> http::HeaderValue {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T: OutgoingResponse> IntoResponse for RumaResponse<T> {
|
impl<T: OutgoingResponse> IntoResponse for RumaResponse<T> {
|
||||||
fn into_response(self) -> Response {
|
fn into_response(self) -> Response {
|
||||||
match self.0.try_into_http_response::<BytesMut>() {
|
match self.0.try_into_http_response::<BytesMut>() {
|
||||||
Ok(res) => res.map(BytesMut::freeze).map(Body::from).into_response(),
|
Ok(res) => res.map(BytesMut::freeze).map(Full::new).into_response(),
|
||||||
Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
|
Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{service::appservice::RegistrationInfo, Error};
|
use crate::Error;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::uiaa::UiaaResponse, CanonicalJsonValue, OwnedDeviceId, OwnedServerName,
|
api::client::uiaa::UiaaResponse, CanonicalJsonValue, OwnedDeviceId, OwnedServerName,
|
||||||
OwnedUserId,
|
OwnedUserId,
|
||||||
|
@ -16,7 +16,7 @@ pub struct Ruma<T> {
|
||||||
pub sender_servername: Option<OwnedServerName>,
|
pub sender_servername: Option<OwnedServerName>,
|
||||||
// This is None when body is not a valid string
|
// This is None when body is not a valid string
|
||||||
pub json_body: Option<CanonicalJsonValue>,
|
pub json_body: Option<CanonicalJsonValue>,
|
||||||
pub appservice_info: Option<RegistrationInfo>,
|
pub from_appservice: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> Deref for Ruma<T> {
|
impl<T> Deref for Ruma<T> {
|
||||||
|
|
File diff suppressed because it is too large
Load diff
27
src/clap.rs
27
src/clap.rs
|
@ -1,27 +0,0 @@
|
||||||
//! Integration with `clap`
|
|
||||||
|
|
||||||
use clap::Parser;
|
|
||||||
|
|
||||||
/// Returns the current version of the crate with extra info if supplied
|
|
||||||
///
|
|
||||||
/// Set the environment variable `CONDUIT_VERSION_EXTRA` to any UTF-8 string to
|
|
||||||
/// include it in parenthesis after the SemVer version. A common value are git
|
|
||||||
/// commit hashes.
|
|
||||||
fn version() -> String {
|
|
||||||
let cargo_pkg_version = env!("CARGO_PKG_VERSION");
|
|
||||||
|
|
||||||
match option_env!("CONDUIT_VERSION_EXTRA") {
|
|
||||||
Some(x) => format!("{} ({})", cargo_pkg_version, x),
|
|
||||||
None => cargo_pkg_version.to_owned(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Command line arguments
|
|
||||||
#[derive(Parser)]
|
|
||||||
#[clap(about, version = version())]
|
|
||||||
pub struct Args {}
|
|
||||||
|
|
||||||
/// Parse command line arguments into structured data
|
|
||||||
pub fn parse() -> Args {
|
|
||||||
Args::parse()
|
|
||||||
}
|
|
|
@ -7,7 +7,6 @@ use std::{
|
||||||
use ruma::{OwnedServerName, RoomVersionId};
|
use ruma::{OwnedServerName, RoomVersionId};
|
||||||
use serde::{de::IgnoredAny, Deserialize};
|
use serde::{de::IgnoredAny, Deserialize};
|
||||||
use tracing::warn;
|
use tracing::warn;
|
||||||
use url::Url;
|
|
||||||
|
|
||||||
mod proxy;
|
mod proxy;
|
||||||
|
|
||||||
|
@ -22,14 +21,13 @@ pub struct Config {
|
||||||
pub tls: Option<TlsConfig>,
|
pub tls: Option<TlsConfig>,
|
||||||
|
|
||||||
pub server_name: OwnedServerName,
|
pub server_name: OwnedServerName,
|
||||||
|
#[serde(default = "default_database_backend")]
|
||||||
pub database_backend: String,
|
pub database_backend: String,
|
||||||
pub database_path: String,
|
pub database_path: String,
|
||||||
#[serde(default = "default_db_cache_capacity_mb")]
|
#[serde(default = "default_db_cache_capacity_mb")]
|
||||||
pub db_cache_capacity_mb: f64,
|
pub db_cache_capacity_mb: f64,
|
||||||
#[serde(default = "true_fn")]
|
#[serde(default = "true_fn")]
|
||||||
pub enable_lightning_bolt: bool,
|
pub enable_lightning_bolt: bool,
|
||||||
#[serde(default = "true_fn")]
|
|
||||||
pub allow_check_for_updates: bool,
|
|
||||||
#[serde(default = "default_conduit_cache_capacity_modifier")]
|
#[serde(default = "default_conduit_cache_capacity_modifier")]
|
||||||
pub conduit_cache_capacity_modifier: f64,
|
pub conduit_cache_capacity_modifier: f64,
|
||||||
#[serde(default = "default_rocksdb_max_open_files")]
|
#[serde(default = "default_rocksdb_max_open_files")]
|
||||||
|
@ -42,25 +40,22 @@ pub struct Config {
|
||||||
pub max_request_size: u32,
|
pub max_request_size: u32,
|
||||||
#[serde(default = "default_max_concurrent_requests")]
|
#[serde(default = "default_max_concurrent_requests")]
|
||||||
pub max_concurrent_requests: u16,
|
pub max_concurrent_requests: u16,
|
||||||
#[serde(default = "default_max_fetch_prev_events")]
|
|
||||||
pub max_fetch_prev_events: u16,
|
|
||||||
#[serde(default = "false_fn")]
|
#[serde(default = "false_fn")]
|
||||||
pub allow_registration: bool,
|
pub allow_registration: bool,
|
||||||
pub registration_token: Option<String>,
|
|
||||||
#[serde(default = "default_openid_token_ttl")]
|
|
||||||
pub openid_token_ttl: u64,
|
|
||||||
#[serde(default = "true_fn")]
|
#[serde(default = "true_fn")]
|
||||||
pub allow_encryption: bool,
|
pub allow_encryption: bool,
|
||||||
#[serde(default = "false_fn")]
|
#[serde(default = "false_fn")]
|
||||||
pub allow_federation: bool,
|
pub allow_federation: bool,
|
||||||
#[serde(default = "true_fn")]
|
#[serde(default = "true_fn")]
|
||||||
|
pub allow_public_read_receipts: bool,
|
||||||
|
#[serde(default = "true_fn")]
|
||||||
|
pub allow_receiving_read_receipts: bool,
|
||||||
|
#[serde(default = "true_fn")]
|
||||||
pub allow_room_creation: bool,
|
pub allow_room_creation: bool,
|
||||||
#[serde(default = "true_fn")]
|
#[serde(default = "true_fn")]
|
||||||
pub allow_unstable_room_versions: bool,
|
pub allow_unstable_room_versions: bool,
|
||||||
#[serde(default = "default_default_room_version")]
|
#[serde(default = "default_default_room_version")]
|
||||||
pub default_room_version: RoomVersionId,
|
pub default_room_version: RoomVersionId,
|
||||||
#[serde(default, flatten)]
|
|
||||||
pub well_known: WellKnownConfig,
|
|
||||||
#[serde(default = "false_fn")]
|
#[serde(default = "false_fn")]
|
||||||
pub allow_jaeger: bool,
|
pub allow_jaeger: bool,
|
||||||
#[serde(default = "false_fn")]
|
#[serde(default = "false_fn")]
|
||||||
|
@ -68,7 +63,7 @@ pub struct Config {
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub proxy: ProxyConfig,
|
pub proxy: ProxyConfig,
|
||||||
pub jwt_secret: Option<String>,
|
pub jwt_secret: Option<String>,
|
||||||
#[serde(default = "default_trusted_servers")]
|
#[serde(default = "Vec::new")]
|
||||||
pub trusted_servers: Vec<OwnedServerName>,
|
pub trusted_servers: Vec<OwnedServerName>,
|
||||||
#[serde(default = "default_log")]
|
#[serde(default = "default_log")]
|
||||||
pub log: String,
|
pub log: String,
|
||||||
|
@ -95,14 +90,6 @@ pub struct TlsConfig {
|
||||||
pub key: String,
|
pub key: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize, Default)]
|
|
||||||
pub struct WellKnownConfig {
|
|
||||||
#[serde(rename = "well_known_client")]
|
|
||||||
pub client: Option<Url>,
|
|
||||||
#[serde(rename = "well_known_server")]
|
|
||||||
pub server: Option<OwnedServerName>,
|
|
||||||
}
|
|
||||||
|
|
||||||
const DEPRECATED_KEYS: &[&str] = &["cache_capacity"];
|
const DEPRECATED_KEYS: &[&str] = &["cache_capacity"];
|
||||||
|
|
||||||
impl Config {
|
impl Config {
|
||||||
|
@ -123,35 +110,9 @@ impl Config {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Config {
|
|
||||||
pub fn well_known_client(&self) -> String {
|
|
||||||
if let Some(url) = &self.well_known.client {
|
|
||||||
url.to_string()
|
|
||||||
} else {
|
|
||||||
format!("https://{}", self.server_name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn well_known_server(&self) -> OwnedServerName {
|
|
||||||
match &self.well_known.server {
|
|
||||||
Some(server_name) => server_name.to_owned(),
|
|
||||||
None => {
|
|
||||||
if self.server_name.port().is_some() {
|
|
||||||
self.server_name.to_owned()
|
|
||||||
} else {
|
|
||||||
format!("{}:443", self.server_name.host())
|
|
||||||
.try_into()
|
|
||||||
.expect("Host from valid hostname + :443 must be valid")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for Config {
|
impl fmt::Display for Config {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
// Prepare a list of config values to show
|
// Prepare a list of config values to show
|
||||||
let well_known_server = self.well_known_server();
|
|
||||||
let lines = [
|
let lines = [
|
||||||
("Server name", self.server_name.host()),
|
("Server name", self.server_name.host()),
|
||||||
("Database backend", &self.database_backend),
|
("Database backend", &self.database_backend),
|
||||||
|
@ -232,8 +193,6 @@ impl fmt::Display for Config {
|
||||||
}
|
}
|
||||||
&lst.join(", ")
|
&lst.join(", ")
|
||||||
}),
|
}),
|
||||||
("Well-known server name", well_known_server.as_str()),
|
|
||||||
("Well-known client URL", &self.well_known_client()),
|
|
||||||
];
|
];
|
||||||
|
|
||||||
let mut msg: String = "Active config values:\n\n".to_owned();
|
let mut msg: String = "Active config values:\n\n".to_owned();
|
||||||
|
@ -262,8 +221,12 @@ fn default_port() -> u16 {
|
||||||
8000
|
8000
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn default_database_backend() -> String {
|
||||||
|
"sqlite".to_owned()
|
||||||
|
}
|
||||||
|
|
||||||
fn default_db_cache_capacity_mb() -> f64 {
|
fn default_db_cache_capacity_mb() -> f64 {
|
||||||
300.0
|
10.0
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_conduit_cache_capacity_modifier() -> f64 {
|
fn default_conduit_cache_capacity_modifier() -> f64 {
|
||||||
|
@ -271,7 +234,7 @@ fn default_conduit_cache_capacity_modifier() -> f64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_rocksdb_max_open_files() -> i32 {
|
fn default_rocksdb_max_open_files() -> i32 {
|
||||||
1000
|
20
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_pdu_cache_capacity() -> u32 {
|
fn default_pdu_cache_capacity() -> u32 {
|
||||||
|
@ -290,27 +253,15 @@ fn default_max_concurrent_requests() -> u16 {
|
||||||
100
|
100
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_max_fetch_prev_events() -> u16 {
|
|
||||||
100_u16
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_trusted_servers() -> Vec<OwnedServerName> {
|
|
||||||
vec![OwnedServerName::try_from("matrix.org").unwrap()]
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_log() -> String {
|
fn default_log() -> String {
|
||||||
"warn,state_res=warn,_=off".to_owned()
|
"warn,state_res=warn,_=off,sled=off".to_owned()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_turn_ttl() -> u64 {
|
fn default_turn_ttl() -> u64 {
|
||||||
60 * 60 * 24
|
60 * 60 * 24
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_openid_token_ttl() -> u64 {
|
|
||||||
60 * 60
|
|
||||||
}
|
|
||||||
|
|
||||||
// I know, it's a great name
|
// I know, it's a great name
|
||||||
pub fn default_default_room_version() -> RoomVersionId {
|
pub fn default_default_room_version() -> RoomVersionId {
|
||||||
RoomVersionId::V10
|
RoomVersionId::V9
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,9 +29,7 @@ use crate::Result;
|
||||||
/// would be used for `ordinary.onion`, `matrix.myspecial.onion`, but not `hello.myspecial.onion`.
|
/// would be used for `ordinary.onion`, `matrix.myspecial.onion`, but not `hello.myspecial.onion`.
|
||||||
#[derive(Clone, Debug, Deserialize)]
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
#[serde(rename_all = "snake_case")]
|
#[serde(rename_all = "snake_case")]
|
||||||
#[derive(Default)]
|
|
||||||
pub enum ProxyConfig {
|
pub enum ProxyConfig {
|
||||||
#[default]
|
|
||||||
None,
|
None,
|
||||||
Global {
|
Global {
|
||||||
#[serde(deserialize_with = "crate::utils::deserialize_from_str")]
|
#[serde(deserialize_with = "crate::utils::deserialize_from_str")]
|
||||||
|
@ -50,6 +48,11 @@ impl ProxyConfig {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
impl Default for ProxyConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
ProxyConfig::None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize)]
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
pub struct PartialProxyConfig {
|
pub struct PartialProxyConfig {
|
||||||
|
|
|
@ -116,7 +116,7 @@ impl KvTree for PersyTree {
|
||||||
match iter {
|
match iter {
|
||||||
Ok(iter) => Box::new(iter.filter_map(|(k, v)| {
|
Ok(iter) => Box::new(iter.filter_map(|(k, v)| {
|
||||||
v.into_iter()
|
v.into_iter()
|
||||||
.map(|val| ((*k).to_owned(), (*val).to_owned()))
|
.map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
|
||||||
.next()
|
.next()
|
||||||
})),
|
})),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
|
@ -142,7 +142,7 @@ impl KvTree for PersyTree {
|
||||||
Ok(iter) => {
|
Ok(iter) => {
|
||||||
let map = iter.filter_map(|(k, v)| {
|
let map = iter.filter_map(|(k, v)| {
|
||||||
v.into_iter()
|
v.into_iter()
|
||||||
.map(|val| ((*k).to_owned(), (*val).to_owned()))
|
.map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
|
||||||
.next()
|
.next()
|
||||||
});
|
});
|
||||||
if backwards {
|
if backwards {
|
||||||
|
@ -179,7 +179,7 @@ impl KvTree for PersyTree {
|
||||||
iter.take_while(move |(k, _)| (*k).starts_with(&owned_prefix))
|
iter.take_while(move |(k, _)| (*k).starts_with(&owned_prefix))
|
||||||
.filter_map(|(k, v)| {
|
.filter_map(|(k, v)| {
|
||||||
v.into_iter()
|
v.into_iter()
|
||||||
.map(|val| ((*k).to_owned(), (*val).to_owned()))
|
.map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
|
||||||
.next()
|
.next()
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
|
|
|
@ -23,35 +23,30 @@ pub struct RocksDbEngineTree<'a> {
|
||||||
fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::Options {
|
fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::Options {
|
||||||
let mut block_based_options = rocksdb::BlockBasedOptions::default();
|
let mut block_based_options = rocksdb::BlockBasedOptions::default();
|
||||||
block_based_options.set_block_cache(rocksdb_cache);
|
block_based_options.set_block_cache(rocksdb_cache);
|
||||||
block_based_options.set_bloom_filter(10.0, false);
|
|
||||||
|
// "Difference of spinning disk"
|
||||||
|
// https://zhangyuchi.gitbooks.io/rocksdbbook/content/RocksDB-Tuning-Guide.html
|
||||||
block_based_options.set_block_size(4 * 1024);
|
block_based_options.set_block_size(4 * 1024);
|
||||||
block_based_options.set_cache_index_and_filter_blocks(true);
|
block_based_options.set_cache_index_and_filter_blocks(true);
|
||||||
block_based_options.set_pin_l0_filter_and_index_blocks_in_cache(true);
|
|
||||||
block_based_options.set_optimize_filters_for_memory(true);
|
|
||||||
|
|
||||||
let mut db_opts = rocksdb::Options::default();
|
let mut db_opts = rocksdb::Options::default();
|
||||||
db_opts.set_block_based_table_factory(&block_based_options);
|
db_opts.set_block_based_table_factory(&block_based_options);
|
||||||
|
db_opts.set_optimize_filters_for_hits(true);
|
||||||
|
db_opts.set_skip_stats_update_on_db_open(true);
|
||||||
|
db_opts.set_level_compaction_dynamic_level_bytes(true);
|
||||||
|
db_opts.set_target_file_size_base(256 * 1024 * 1024);
|
||||||
|
//db_opts.set_compaction_readahead_size(2 * 1024 * 1024);
|
||||||
|
//db_opts.set_use_direct_reads(true);
|
||||||
|
//db_opts.set_use_direct_io_for_flush_and_compaction(true);
|
||||||
db_opts.create_if_missing(true);
|
db_opts.create_if_missing(true);
|
||||||
db_opts.increase_parallelism(num_cpus::get() as i32);
|
db_opts.increase_parallelism(num_cpus::get() as i32);
|
||||||
db_opts.set_max_open_files(max_open_files);
|
db_opts.set_max_open_files(max_open_files);
|
||||||
db_opts.set_compression_type(rocksdb::DBCompressionType::Lz4);
|
db_opts.set_compression_type(rocksdb::DBCompressionType::Zstd);
|
||||||
db_opts.set_bottommost_compression_type(rocksdb::DBCompressionType::Zstd);
|
|
||||||
db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level);
|
db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level);
|
||||||
|
db_opts.optimize_level_style_compaction(10 * 1024 * 1024);
|
||||||
|
|
||||||
// https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning
|
let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1);
|
||||||
db_opts.set_level_compaction_dynamic_level_bytes(true);
|
db_opts.set_prefix_extractor(prefix_extractor);
|
||||||
db_opts.set_max_background_jobs(6);
|
|
||||||
db_opts.set_bytes_per_sync(1048576);
|
|
||||||
|
|
||||||
// https://github.com/facebook/rocksdb/issues/849
|
|
||||||
db_opts.set_keep_log_file_num(100);
|
|
||||||
|
|
||||||
// https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes#ktoleratecorruptedtailrecords
|
|
||||||
//
|
|
||||||
// Unclean shutdowns of a Matrix homeserver are likely to be fine when
|
|
||||||
// recovered in this manner as it's likely any lost information will be
|
|
||||||
// restored via federation.
|
|
||||||
db_opts.set_wal_recovery_mode(rocksdb::DBRecoveryMode::TolerateCorruptedTailRecords);
|
|
||||||
|
|
||||||
db_opts
|
db_opts
|
||||||
}
|
}
|
||||||
|
@ -59,7 +54,7 @@ fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::O
|
||||||
impl KeyValueDatabaseEngine for Arc<Engine> {
|
impl KeyValueDatabaseEngine for Arc<Engine> {
|
||||||
fn open(config: &Config) -> Result<Self> {
|
fn open(config: &Config) -> Result<Self> {
|
||||||
let cache_capacity_bytes = (config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize;
|
let cache_capacity_bytes = (config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize;
|
||||||
let rocksdb_cache = rocksdb::Cache::new_lru_cache(cache_capacity_bytes);
|
let rocksdb_cache = rocksdb::Cache::new_lru_cache(cache_capacity_bytes).unwrap();
|
||||||
|
|
||||||
let db_opts = db_options(config.rocksdb_max_open_files, &rocksdb_cache);
|
let db_opts = db_options(config.rocksdb_max_open_files, &rocksdb_cache);
|
||||||
|
|
||||||
|
@ -136,17 +131,12 @@ impl RocksDbEngineTree<'_> {
|
||||||
|
|
||||||
impl KvTree for RocksDbEngineTree<'_> {
|
impl KvTree for RocksDbEngineTree<'_> {
|
||||||
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
||||||
let readoptions = rocksdb::ReadOptions::default();
|
Ok(self.db.rocks.get_cf(&self.cf(), key)?)
|
||||||
|
|
||||||
Ok(self.db.rocks.get_cf_opt(&self.cf(), key, &readoptions)?)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> {
|
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> {
|
||||||
let writeoptions = rocksdb::WriteOptions::default();
|
|
||||||
let lock = self.write_lock.read().unwrap();
|
let lock = self.write_lock.read().unwrap();
|
||||||
self.db
|
self.db.rocks.put_cf(&self.cf(), key, value)?;
|
||||||
.rocks
|
|
||||||
.put_cf_opt(&self.cf(), key, value, &writeoptions)?;
|
|
||||||
drop(lock);
|
drop(lock);
|
||||||
|
|
||||||
self.watchers.wake(key);
|
self.watchers.wake(key);
|
||||||
|
@ -155,32 +145,23 @@ impl KvTree for RocksDbEngineTree<'_> {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn insert_batch<'a>(&self, iter: &mut dyn Iterator<Item = (Vec<u8>, Vec<u8>)>) -> Result<()> {
|
fn insert_batch<'a>(&self, iter: &mut dyn Iterator<Item = (Vec<u8>, Vec<u8>)>) -> Result<()> {
|
||||||
let writeoptions = rocksdb::WriteOptions::default();
|
|
||||||
for (key, value) in iter {
|
for (key, value) in iter {
|
||||||
self.db
|
self.db.rocks.put_cf(&self.cf(), key, value)?;
|
||||||
.rocks
|
|
||||||
.put_cf_opt(&self.cf(), key, value, &writeoptions)?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn remove(&self, key: &[u8]) -> Result<()> {
|
fn remove(&self, key: &[u8]) -> Result<()> {
|
||||||
let writeoptions = rocksdb::WriteOptions::default();
|
Ok(self.db.rocks.delete_cf(&self.cf(), key)?)
|
||||||
Ok(self
|
|
||||||
.db
|
|
||||||
.rocks
|
|
||||||
.delete_cf_opt(&self.cf(), key, &writeoptions)?)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
||||||
let readoptions = rocksdb::ReadOptions::default();
|
|
||||||
|
|
||||||
Box::new(
|
Box::new(
|
||||||
self.db
|
self.db
|
||||||
.rocks
|
.rocks
|
||||||
.iterator_cf_opt(&self.cf(), readoptions, rocksdb::IteratorMode::Start)
|
.iterator_cf(&self.cf(), rocksdb::IteratorMode::Start)
|
||||||
.map(|r| r.unwrap())
|
//.map(|r| r.unwrap())
|
||||||
.map(|(k, v)| (Vec::from(k), Vec::from(v))),
|
.map(|(k, v)| (Vec::from(k), Vec::from(v))),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -190,14 +171,11 @@ impl KvTree for RocksDbEngineTree<'_> {
|
||||||
from: &[u8],
|
from: &[u8],
|
||||||
backwards: bool,
|
backwards: bool,
|
||||||
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
||||||
let readoptions = rocksdb::ReadOptions::default();
|
|
||||||
|
|
||||||
Box::new(
|
Box::new(
|
||||||
self.db
|
self.db
|
||||||
.rocks
|
.rocks
|
||||||
.iterator_cf_opt(
|
.iterator_cf(
|
||||||
&self.cf(),
|
&self.cf(),
|
||||||
readoptions,
|
|
||||||
rocksdb::IteratorMode::From(
|
rocksdb::IteratorMode::From(
|
||||||
from,
|
from,
|
||||||
if backwards {
|
if backwards {
|
||||||
|
@ -207,39 +185,29 @@ impl KvTree for RocksDbEngineTree<'_> {
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
.map(|r| r.unwrap())
|
//.map(|r| r.unwrap())
|
||||||
.map(|(k, v)| (Vec::from(k), Vec::from(v))),
|
.map(|(k, v)| (Vec::from(k), Vec::from(v))),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
|
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
|
||||||
let readoptions = rocksdb::ReadOptions::default();
|
|
||||||
let writeoptions = rocksdb::WriteOptions::default();
|
|
||||||
|
|
||||||
let lock = self.write_lock.write().unwrap();
|
let lock = self.write_lock.write().unwrap();
|
||||||
|
|
||||||
let old = self.db.rocks.get_cf_opt(&self.cf(), key, &readoptions)?;
|
let old = self.db.rocks.get_cf(&self.cf(), key)?;
|
||||||
let new = utils::increment(old.as_deref()).unwrap();
|
let new = utils::increment(old.as_deref()).unwrap();
|
||||||
self.db
|
self.db.rocks.put_cf(&self.cf(), key, &new)?;
|
||||||
.rocks
|
|
||||||
.put_cf_opt(&self.cf(), key, &new, &writeoptions)?;
|
|
||||||
|
|
||||||
drop(lock);
|
drop(lock);
|
||||||
Ok(new)
|
Ok(new)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn increment_batch<'a>(&self, iter: &mut dyn Iterator<Item = Vec<u8>>) -> Result<()> {
|
fn increment_batch<'a>(&self, iter: &mut dyn Iterator<Item = Vec<u8>>) -> Result<()> {
|
||||||
let readoptions = rocksdb::ReadOptions::default();
|
|
||||||
let writeoptions = rocksdb::WriteOptions::default();
|
|
||||||
|
|
||||||
let lock = self.write_lock.write().unwrap();
|
let lock = self.write_lock.write().unwrap();
|
||||||
|
|
||||||
for key in iter {
|
for key in iter {
|
||||||
let old = self.db.rocks.get_cf_opt(&self.cf(), &key, &readoptions)?;
|
let old = self.db.rocks.get_cf(&self.cf(), &key)?;
|
||||||
let new = utils::increment(old.as_deref()).unwrap();
|
let new = utils::increment(old.as_deref()).unwrap();
|
||||||
self.db
|
self.db.rocks.put_cf(&self.cf(), key, new)?;
|
||||||
.rocks
|
|
||||||
.put_cf_opt(&self.cf(), key, new, &writeoptions)?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
drop(lock);
|
drop(lock);
|
||||||
|
@ -251,17 +219,14 @@ impl KvTree for RocksDbEngineTree<'_> {
|
||||||
&'a self,
|
&'a self,
|
||||||
prefix: Vec<u8>,
|
prefix: Vec<u8>,
|
||||||
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
||||||
let readoptions = rocksdb::ReadOptions::default();
|
|
||||||
|
|
||||||
Box::new(
|
Box::new(
|
||||||
self.db
|
self.db
|
||||||
.rocks
|
.rocks
|
||||||
.iterator_cf_opt(
|
.iterator_cf(
|
||||||
&self.cf(),
|
&self.cf(),
|
||||||
readoptions,
|
|
||||||
rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward),
|
rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward),
|
||||||
)
|
)
|
||||||
.map(|r| r.unwrap())
|
//.map(|r| r.unwrap())
|
||||||
.map(|(k, v)| (Vec::from(k), Vec::from(v)))
|
.map(|(k, v)| (Vec::from(k), Vec::from(v)))
|
||||||
.take_while(move |(k, _)| k.starts_with(&prefix)),
|
.take_while(move |(k, _)| k.starts_with(&prefix)),
|
||||||
)
|
)
|
||||||
|
|
|
@ -13,8 +13,8 @@ use thread_local::ThreadLocal;
|
||||||
use tracing::debug;
|
use tracing::debug;
|
||||||
|
|
||||||
thread_local! {
|
thread_local! {
|
||||||
static READ_CONNECTION: RefCell<Option<&'static Connection>> = const { RefCell::new(None) };
|
static READ_CONNECTION: RefCell<Option<&'static Connection>> = RefCell::new(None);
|
||||||
static READ_CONNECTION_ITERATOR: RefCell<Option<&'static Connection>> = const { RefCell::new(None) };
|
static READ_CONNECTION_ITERATOR: RefCell<Option<&'static Connection>> = RefCell::new(None);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct PreparedStatementIterator<'a> {
|
struct PreparedStatementIterator<'a> {
|
||||||
|
@ -33,7 +33,7 @@ impl Iterator for PreparedStatementIterator<'_> {
|
||||||
struct NonAliasingBox<T>(*mut T);
|
struct NonAliasingBox<T>(*mut T);
|
||||||
impl<T> Drop for NonAliasingBox<T> {
|
impl<T> Drop for NonAliasingBox<T> {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
drop(unsafe { Box::from_raw(self.0) });
|
unsafe { Box::from_raw(self.0) };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,6 @@ use tokio::sync::watch;
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub(super) struct Watchers {
|
pub(super) struct Watchers {
|
||||||
#[allow(clippy::type_complexity)]
|
|
||||||
watchers: RwLock<HashMap<Vec<u8>, (watch::Sender<()>, watch::Receiver<()>)>>,
|
watchers: RwLock<HashMap<Vec<u8>, (watch::Sender<()>, watch::Receiver<()>)>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,7 +19,7 @@ impl Watchers {
|
||||||
let mut rx = match self.watchers.write().unwrap().entry(prefix.to_vec()) {
|
let mut rx = match self.watchers.write().unwrap().entry(prefix.to_vec()) {
|
||||||
hash_map::Entry::Occupied(o) => o.get().1.clone(),
|
hash_map::Entry::Occupied(o) => o.get().1.clone(),
|
||||||
hash_map::Entry::Vacant(v) => {
|
hash_map::Entry::Vacant(v) => {
|
||||||
let (tx, rx) = watch::channel(());
|
let (tx, rx) = tokio::sync::watch::channel(());
|
||||||
v.insert((tx, rx.clone()));
|
v.insert((tx, rx.clone()));
|
||||||
rx
|
rx
|
||||||
}
|
}
|
||||||
|
|
|
@ -123,12 +123,13 @@ impl service::account_data::Data for KeyValueDatabase {
|
||||||
.take_while(move |(k, _)| k.starts_with(&prefix))
|
.take_while(move |(k, _)| k.starts_with(&prefix))
|
||||||
.map(|(k, v)| {
|
.map(|(k, v)| {
|
||||||
Ok::<_, Error>((
|
Ok::<_, Error>((
|
||||||
RoomAccountDataEventType::from(
|
RoomAccountDataEventType::try_from(
|
||||||
utils::string_from_bytes(k.rsplit(|&b| b == 0xff).next().ok_or_else(
|
utils::string_from_bytes(k.rsplit(|&b| b == 0xff).next().ok_or_else(
|
||||||
|| Error::bad_database("RoomUserData ID in db is invalid."),
|
|| Error::bad_database("RoomUserData ID in db is invalid."),
|
||||||
)?)
|
)?)
|
||||||
.map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?,
|
.map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?,
|
||||||
),
|
)
|
||||||
|
.map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?,
|
||||||
serde_json::from_slice::<Raw<AnyEphemeralRoomEvent>>(&v).map_err(|_| {
|
serde_json::from_slice::<Raw<AnyEphemeralRoomEvent>>(&v).map_err(|_| {
|
||||||
Error::bad_database("Database contains invalid account data.")
|
Error::bad_database("Database contains invalid account data.")
|
||||||
})?,
|
})?,
|
||||||
|
|
|
@ -1,15 +1,18 @@
|
||||||
use ruma::api::appservice::Registration;
|
|
||||||
|
|
||||||
use crate::{database::KeyValueDatabase, service, utils, Error, Result};
|
use crate::{database::KeyValueDatabase, service, utils, Error, Result};
|
||||||
|
|
||||||
impl service::appservice::Data for KeyValueDatabase {
|
impl service::appservice::Data for KeyValueDatabase {
|
||||||
/// Registers an appservice and returns the ID to the caller
|
/// Registers an appservice and returns the ID to the caller
|
||||||
fn register_appservice(&self, yaml: Registration) -> Result<String> {
|
fn register_appservice(&self, yaml: serde_yaml::Value) -> Result<String> {
|
||||||
let id = yaml.id.as_str();
|
// TODO: Rumaify
|
||||||
|
let id = yaml.get("id").unwrap().as_str().unwrap();
|
||||||
self.id_appserviceregistrations.insert(
|
self.id_appserviceregistrations.insert(
|
||||||
id.as_bytes(),
|
id.as_bytes(),
|
||||||
serde_yaml::to_string(&yaml).unwrap().as_bytes(),
|
serde_yaml::to_string(&yaml).unwrap().as_bytes(),
|
||||||
)?;
|
)?;
|
||||||
|
self.cached_registrations
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.insert(id.to_owned(), yaml.to_owned());
|
||||||
|
|
||||||
Ok(id.to_owned())
|
Ok(id.to_owned())
|
||||||
}
|
}
|
||||||
|
@ -22,18 +25,33 @@ impl service::appservice::Data for KeyValueDatabase {
|
||||||
fn unregister_appservice(&self, service_name: &str) -> Result<()> {
|
fn unregister_appservice(&self, service_name: &str) -> Result<()> {
|
||||||
self.id_appserviceregistrations
|
self.id_appserviceregistrations
|
||||||
.remove(service_name.as_bytes())?;
|
.remove(service_name.as_bytes())?;
|
||||||
|
self.cached_registrations
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.remove(service_name);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_registration(&self, id: &str) -> Result<Option<Registration>> {
|
fn get_registration(&self, id: &str) -> Result<Option<serde_yaml::Value>> {
|
||||||
self.id_appserviceregistrations
|
self.cached_registrations
|
||||||
.get(id.as_bytes())?
|
.read()
|
||||||
.map(|bytes| {
|
.unwrap()
|
||||||
serde_yaml::from_slice(&bytes).map_err(|_| {
|
.get(id)
|
||||||
Error::bad_database("Invalid registration bytes in id_appserviceregistrations.")
|
.map_or_else(
|
||||||
})
|
|| {
|
||||||
})
|
self.id_appserviceregistrations
|
||||||
.transpose()
|
.get(id.as_bytes())?
|
||||||
|
.map(|bytes| {
|
||||||
|
serde_yaml::from_slice(&bytes).map_err(|_| {
|
||||||
|
Error::bad_database(
|
||||||
|
"Invalid registration bytes in id_appserviceregistrations.",
|
||||||
|
)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
|
},
|
||||||
|
|r| Ok(Some(r.clone())),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn iter_ids<'a>(&'a self) -> Result<Box<dyn Iterator<Item = Result<String>> + 'a>> {
|
fn iter_ids<'a>(&'a self) -> Result<Box<dyn Iterator<Item = Result<String>> + 'a>> {
|
||||||
|
@ -46,7 +64,7 @@ impl service::appservice::Data for KeyValueDatabase {
|
||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn all(&self) -> Result<Vec<(String, Registration)>> {
|
fn all(&self) -> Result<Vec<(String, serde_yaml::Value)>> {
|
||||||
self.iter_ids()?
|
self.iter_ids()?
|
||||||
.filter_map(|id| id.ok())
|
.filter_map(|id| id.ok())
|
||||||
.map(move |id| {
|
.map(move |id| {
|
||||||
|
|
|
@ -1,22 +1,16 @@
|
||||||
use std::collections::HashMap;
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use futures_util::{stream::FuturesUnordered, StreamExt};
|
use futures_util::{stream::FuturesUnordered, StreamExt};
|
||||||
use lru_cache::LruCache;
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::federation::discovery::{OldVerifyKey, ServerSigningKeys},
|
api::federation::discovery::{ServerSigningKeys, VerifyKey},
|
||||||
signatures::Ed25519KeyPair,
|
signatures::Ed25519KeyPair,
|
||||||
DeviceId, ServerName, UserId,
|
DeviceId, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName, UserId,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||||
database::KeyValueDatabase,
|
|
||||||
service::{self, globals::SigningKeys},
|
|
||||||
services, utils, Error, Result,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const COUNTER: &[u8] = b"c";
|
pub const COUNTER: &[u8] = b"c";
|
||||||
pub const LAST_CHECK_FOR_UPDATES_COUNT: &[u8] = b"u";
|
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl service::globals::Data for KeyValueDatabase {
|
impl service::globals::Data for KeyValueDatabase {
|
||||||
|
@ -32,23 +26,6 @@ impl service::globals::Data for KeyValueDatabase {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn last_check_for_updates_id(&self) -> Result<u64> {
|
|
||||||
self.global
|
|
||||||
.get(LAST_CHECK_FOR_UPDATES_COUNT)?
|
|
||||||
.map_or(Ok(0_u64), |bytes| {
|
|
||||||
utils::u64_from_bytes(&bytes).map_err(|_| {
|
|
||||||
Error::bad_database("last check for updates count has invalid bytes.")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn update_check_for_updates_id(&self, id: u64) -> Result<()> {
|
|
||||||
self.global
|
|
||||||
.insert(LAST_CHECK_FOR_UPDATES_COUNT, &id.to_be_bytes())?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> {
|
async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> {
|
||||||
let userid_bytes = user_id.as_bytes().to_vec();
|
let userid_bytes = user_id.as_bytes().to_vec();
|
||||||
let mut userid_prefix = userid_bytes.clone();
|
let mut userid_prefix = userid_bytes.clone();
|
||||||
|
@ -98,9 +75,7 @@ impl service::globals::Data for KeyValueDatabase {
|
||||||
futures.push(self.pduid_pdu.watch_prefix(&short_roomid));
|
futures.push(self.pduid_pdu.watch_prefix(&short_roomid));
|
||||||
|
|
||||||
// EDUs
|
// EDUs
|
||||||
futures.push(Box::into_pin(Box::new(async move {
|
futures.push(self.roomid_lasttypingupdate.watch_prefix(&roomid_bytes));
|
||||||
let _result = services().rooms.edus.typing.wait_for_update(&room_id).await;
|
|
||||||
})));
|
|
||||||
|
|
||||||
futures.push(self.readreceiptid_readreceipt.watch_prefix(&roomid_prefix));
|
futures.push(self.readreceiptid_readreceipt.watch_prefix(&roomid_prefix));
|
||||||
|
|
||||||
|
@ -143,67 +118,8 @@ impl service::globals::Data for KeyValueDatabase {
|
||||||
self._db.cleanup()
|
self._db.cleanup()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn memory_usage(&self) -> String {
|
fn memory_usage(&self) -> Result<String> {
|
||||||
let pdu_cache = self.pdu_cache.lock().unwrap().len();
|
self._db.memory_usage()
|
||||||
let shorteventid_cache = self.shorteventid_cache.lock().unwrap().len();
|
|
||||||
let auth_chain_cache = self.auth_chain_cache.lock().unwrap().len();
|
|
||||||
let eventidshort_cache = self.eventidshort_cache.lock().unwrap().len();
|
|
||||||
let statekeyshort_cache = self.statekeyshort_cache.lock().unwrap().len();
|
|
||||||
let our_real_users_cache = self.our_real_users_cache.read().unwrap().len();
|
|
||||||
let appservice_in_room_cache = self.appservice_in_room_cache.read().unwrap().len();
|
|
||||||
let lasttimelinecount_cache = self.lasttimelinecount_cache.lock().unwrap().len();
|
|
||||||
|
|
||||||
let mut response = format!(
|
|
||||||
"\
|
|
||||||
pdu_cache: {pdu_cache}
|
|
||||||
shorteventid_cache: {shorteventid_cache}
|
|
||||||
auth_chain_cache: {auth_chain_cache}
|
|
||||||
eventidshort_cache: {eventidshort_cache}
|
|
||||||
statekeyshort_cache: {statekeyshort_cache}
|
|
||||||
our_real_users_cache: {our_real_users_cache}
|
|
||||||
appservice_in_room_cache: {appservice_in_room_cache}
|
|
||||||
lasttimelinecount_cache: {lasttimelinecount_cache}\n"
|
|
||||||
);
|
|
||||||
if let Ok(db_stats) = self._db.memory_usage() {
|
|
||||||
response += &db_stats;
|
|
||||||
}
|
|
||||||
|
|
||||||
response
|
|
||||||
}
|
|
||||||
|
|
||||||
fn clear_caches(&self, amount: u32) {
|
|
||||||
if amount > 0 {
|
|
||||||
let c = &mut *self.pdu_cache.lock().unwrap();
|
|
||||||
*c = LruCache::new(c.capacity());
|
|
||||||
}
|
|
||||||
if amount > 1 {
|
|
||||||
let c = &mut *self.shorteventid_cache.lock().unwrap();
|
|
||||||
*c = LruCache::new(c.capacity());
|
|
||||||
}
|
|
||||||
if amount > 2 {
|
|
||||||
let c = &mut *self.auth_chain_cache.lock().unwrap();
|
|
||||||
*c = LruCache::new(c.capacity());
|
|
||||||
}
|
|
||||||
if amount > 3 {
|
|
||||||
let c = &mut *self.eventidshort_cache.lock().unwrap();
|
|
||||||
*c = LruCache::new(c.capacity());
|
|
||||||
}
|
|
||||||
if amount > 4 {
|
|
||||||
let c = &mut *self.statekeyshort_cache.lock().unwrap();
|
|
||||||
*c = LruCache::new(c.capacity());
|
|
||||||
}
|
|
||||||
if amount > 5 {
|
|
||||||
let c = &mut *self.our_real_users_cache.write().unwrap();
|
|
||||||
*c = HashMap::new();
|
|
||||||
}
|
|
||||||
if amount > 6 {
|
|
||||||
let c = &mut *self.appservice_in_room_cache.write().unwrap();
|
|
||||||
*c = HashMap::new();
|
|
||||||
}
|
|
||||||
if amount > 7 {
|
|
||||||
let c = &mut *self.lasttimelinecount_cache.lock().unwrap();
|
|
||||||
*c = HashMap::new();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_keypair(&self) -> Result<Ed25519KeyPair> {
|
fn load_keypair(&self) -> Result<Ed25519KeyPair> {
|
||||||
|
@ -241,97 +157,64 @@ lasttimelinecount_cache: {lasttimelinecount_cache}\n"
|
||||||
self.global.remove(b"keypair")
|
self.global.remove(b"keypair")
|
||||||
}
|
}
|
||||||
|
|
||||||
fn add_signing_key_from_trusted_server(
|
fn add_signing_key(
|
||||||
&self,
|
&self,
|
||||||
origin: &ServerName,
|
origin: &ServerName,
|
||||||
new_keys: ServerSigningKeys,
|
new_keys: ServerSigningKeys,
|
||||||
) -> Result<SigningKeys> {
|
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
|
||||||
let prev_keys = self.server_signingkeys.get(origin.as_bytes())?;
|
// Not atomic, but this is not critical
|
||||||
|
let signingkeys = self.server_signingkeys.get(origin.as_bytes())?;
|
||||||
|
|
||||||
Ok(
|
let mut keys = signingkeys
|
||||||
if let Some(mut prev_keys) =
|
.and_then(|keys| serde_json::from_slice(&keys).ok())
|
||||||
prev_keys.and_then(|keys| serde_json::from_slice::<ServerSigningKeys>(&keys).ok())
|
.unwrap_or_else(|| {
|
||||||
{
|
// Just insert "now", it doesn't matter
|
||||||
let ServerSigningKeys {
|
ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now())
|
||||||
verify_keys,
|
});
|
||||||
old_verify_keys,
|
|
||||||
..
|
|
||||||
} = new_keys;
|
|
||||||
|
|
||||||
prev_keys.verify_keys.extend(verify_keys);
|
let ServerSigningKeys {
|
||||||
prev_keys.old_verify_keys.extend(old_verify_keys);
|
verify_keys,
|
||||||
prev_keys.valid_until_ts = new_keys.valid_until_ts;
|
old_verify_keys,
|
||||||
|
..
|
||||||
|
} = new_keys;
|
||||||
|
|
||||||
self.server_signingkeys.insert(
|
keys.verify_keys.extend(verify_keys.into_iter());
|
||||||
origin.as_bytes(),
|
keys.old_verify_keys.extend(old_verify_keys.into_iter());
|
||||||
&serde_json::to_vec(&prev_keys).expect("serversigningkeys can be serialized"),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
prev_keys.into()
|
self.server_signingkeys.insert(
|
||||||
} else {
|
origin.as_bytes(),
|
||||||
self.server_signingkeys.insert(
|
&serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"),
|
||||||
origin.as_bytes(),
|
)?;
|
||||||
&serde_json::to_vec(&new_keys).expect("serversigningkeys can be serialized"),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
new_keys.into()
|
let mut tree = keys.verify_keys;
|
||||||
},
|
tree.extend(
|
||||||
)
|
keys.old_verify_keys
|
||||||
}
|
.into_iter()
|
||||||
|
.map(|old| (old.0, VerifyKey::new(old.1.key))),
|
||||||
|
);
|
||||||
|
|
||||||
fn add_signing_key_from_origin(
|
Ok(tree)
|
||||||
&self,
|
|
||||||
origin: &ServerName,
|
|
||||||
new_keys: ServerSigningKeys,
|
|
||||||
) -> Result<SigningKeys> {
|
|
||||||
let prev_keys = self.server_signingkeys.get(origin.as_bytes())?;
|
|
||||||
|
|
||||||
Ok(
|
|
||||||
if let Some(mut prev_keys) =
|
|
||||||
prev_keys.and_then(|keys| serde_json::from_slice::<ServerSigningKeys>(&keys).ok())
|
|
||||||
{
|
|
||||||
let ServerSigningKeys {
|
|
||||||
verify_keys,
|
|
||||||
old_verify_keys,
|
|
||||||
..
|
|
||||||
} = new_keys;
|
|
||||||
|
|
||||||
// Moving `verify_keys` no longer present to `old_verify_keys`
|
|
||||||
for (key_id, key) in prev_keys.verify_keys {
|
|
||||||
if !verify_keys.contains_key(&key_id) {
|
|
||||||
prev_keys
|
|
||||||
.old_verify_keys
|
|
||||||
.insert(key_id, OldVerifyKey::new(prev_keys.valid_until_ts, key.key));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
prev_keys.verify_keys = verify_keys;
|
|
||||||
prev_keys.old_verify_keys.extend(old_verify_keys);
|
|
||||||
prev_keys.valid_until_ts = new_keys.valid_until_ts;
|
|
||||||
|
|
||||||
self.server_signingkeys.insert(
|
|
||||||
origin.as_bytes(),
|
|
||||||
&serde_json::to_vec(&prev_keys).expect("serversigningkeys can be serialized"),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
prev_keys.into()
|
|
||||||
} else {
|
|
||||||
self.server_signingkeys.insert(
|
|
||||||
origin.as_bytes(),
|
|
||||||
&serde_json::to_vec(&new_keys).expect("serversigningkeys can be serialized"),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
new_keys.into()
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server.
|
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server.
|
||||||
fn signing_keys_for(&self, origin: &ServerName) -> Result<Option<SigningKeys>> {
|
fn signing_keys_for(
|
||||||
|
&self,
|
||||||
|
origin: &ServerName,
|
||||||
|
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
|
||||||
let signingkeys = self
|
let signingkeys = self
|
||||||
.server_signingkeys
|
.server_signingkeys
|
||||||
.get(origin.as_bytes())?
|
.get(origin.as_bytes())?
|
||||||
.and_then(|bytes| serde_json::from_slice::<SigningKeys>(&bytes).ok());
|
.and_then(|bytes| serde_json::from_slice(&bytes).ok())
|
||||||
|
.map(|keys: ServerSigningKeys| {
|
||||||
|
let mut tree = keys.verify_keys;
|
||||||
|
tree.extend(
|
||||||
|
keys.old_verify_keys
|
||||||
|
.into_iter()
|
||||||
|
.map(|old| (old.0, VerifyKey::new(old.1.key))),
|
||||||
|
);
|
||||||
|
tree
|
||||||
|
})
|
||||||
|
.unwrap_or_else(BTreeMap::new);
|
||||||
|
|
||||||
Ok(signingkeys)
|
Ok(signingkeys)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use ruma::{api::client::error::ErrorKind, http_headers::ContentDisposition};
|
use ruma::api::client::error::ErrorKind;
|
||||||
|
|
||||||
use crate::{database::KeyValueDatabase, service, utils, Error, Result};
|
use crate::{database::KeyValueDatabase, service, utils, Error, Result};
|
||||||
|
|
||||||
|
@ -8,7 +8,7 @@ impl service::media::Data for KeyValueDatabase {
|
||||||
mxc: String,
|
mxc: String,
|
||||||
width: u32,
|
width: u32,
|
||||||
height: u32,
|
height: u32,
|
||||||
content_disposition: &ContentDisposition,
|
content_disposition: Option<&str>,
|
||||||
content_type: Option<&str>,
|
content_type: Option<&str>,
|
||||||
) -> Result<Vec<u8>> {
|
) -> Result<Vec<u8>> {
|
||||||
let mut key = mxc.as_bytes().to_vec();
|
let mut key = mxc.as_bytes().to_vec();
|
||||||
|
@ -16,7 +16,12 @@ impl service::media::Data for KeyValueDatabase {
|
||||||
key.extend_from_slice(&width.to_be_bytes());
|
key.extend_from_slice(&width.to_be_bytes());
|
||||||
key.extend_from_slice(&height.to_be_bytes());
|
key.extend_from_slice(&height.to_be_bytes());
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
key.extend_from_slice(content_disposition.to_string().as_bytes());
|
key.extend_from_slice(
|
||||||
|
content_disposition
|
||||||
|
.as_ref()
|
||||||
|
.map(|f| f.as_bytes())
|
||||||
|
.unwrap_or_default(),
|
||||||
|
);
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
key.extend_from_slice(
|
key.extend_from_slice(
|
||||||
content_type
|
content_type
|
||||||
|
@ -35,7 +40,7 @@ impl service::media::Data for KeyValueDatabase {
|
||||||
mxc: String,
|
mxc: String,
|
||||||
width: u32,
|
width: u32,
|
||||||
height: u32,
|
height: u32,
|
||||||
) -> Result<(ContentDisposition, Option<String>, Vec<u8>)> {
|
) -> Result<(Option<String>, Option<String>, Vec<u8>)> {
|
||||||
let mut prefix = mxc.as_bytes().to_vec();
|
let mut prefix = mxc.as_bytes().to_vec();
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
prefix.extend_from_slice(&width.to_be_bytes());
|
prefix.extend_from_slice(&width.to_be_bytes());
|
||||||
|
@ -63,9 +68,15 @@ impl service::media::Data for KeyValueDatabase {
|
||||||
.next()
|
.next()
|
||||||
.ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?;
|
.ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?;
|
||||||
|
|
||||||
let content_disposition = content_disposition_bytes.try_into().unwrap_or_else(|_| {
|
let content_disposition = if content_disposition_bytes.is_empty() {
|
||||||
ContentDisposition::new(ruma::http_headers::ContentDispositionType::Inline)
|
None
|
||||||
});
|
} else {
|
||||||
|
Some(
|
||||||
|
utils::string_from_bytes(content_disposition_bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Content Disposition in mediaid_file is invalid unicode.")
|
||||||
|
})?,
|
||||||
|
)
|
||||||
|
};
|
||||||
Ok((content_disposition, content_type, key))
|
Ok((content_disposition, content_type, key))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,15 +1,9 @@
|
||||||
use ruma::{
|
use ruma::{api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId};
|
||||||
api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId,
|
|
||||||
UserId,
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||||
|
|
||||||
impl service::rooms::alias::Data for KeyValueDatabase {
|
impl service::rooms::alias::Data for KeyValueDatabase {
|
||||||
fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()> {
|
fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> {
|
||||||
// Comes first as we don't want a stuck alias
|
|
||||||
self.alias_userid
|
|
||||||
.insert(alias.alias().as_bytes(), user_id.as_bytes())?;
|
|
||||||
self.alias_roomid
|
self.alias_roomid
|
||||||
.insert(alias.alias().as_bytes(), room_id.as_bytes())?;
|
.insert(alias.alias().as_bytes(), room_id.as_bytes())?;
|
||||||
let mut aliasid = room_id.as_bytes().to_vec();
|
let mut aliasid = room_id.as_bytes().to_vec();
|
||||||
|
@ -28,13 +22,13 @@ impl service::rooms::alias::Data for KeyValueDatabase {
|
||||||
self.aliasid_alias.remove(&key)?;
|
self.aliasid_alias.remove(&key)?;
|
||||||
}
|
}
|
||||||
self.alias_roomid.remove(alias.alias().as_bytes())?;
|
self.alias_roomid.remove(alias.alias().as_bytes())?;
|
||||||
self.alias_userid.remove(alias.alias().as_bytes())
|
|
||||||
} else {
|
} else {
|
||||||
Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::NotFound,
|
ErrorKind::NotFound,
|
||||||
"Alias does not exist.",
|
"Alias does not exist.",
|
||||||
))
|
));
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedRoomId>> {
|
fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedRoomId>> {
|
||||||
|
@ -63,16 +57,4 @@ impl service::rooms::alias::Data for KeyValueDatabase {
|
||||||
.map_err(|_| Error::bad_database("Invalid alias in aliasid_alias."))
|
.map_err(|_| Error::bad_database("Invalid alias in aliasid_alias."))
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn who_created_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedUserId>> {
|
|
||||||
self.alias_userid
|
|
||||||
.get(alias.alias().as_bytes())?
|
|
||||||
.map(|bytes| {
|
|
||||||
UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| {
|
|
||||||
Error::bad_database("User ID in alias_userid is invalid unicode.")
|
|
||||||
})?)
|
|
||||||
.map_err(|_| Error::bad_database("User ID in alias_roomid is invalid."))
|
|
||||||
})
|
|
||||||
.transpose()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
mod presence;
|
mod presence;
|
||||||
mod read_receipt;
|
mod read_receipt;
|
||||||
|
mod typing;
|
||||||
|
|
||||||
use crate::{database::KeyValueDatabase, service};
|
use crate::{database::KeyValueDatabase, service};
|
||||||
|
|
||||||
|
|
|
@ -105,16 +105,25 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> {
|
fn private_read_set(
|
||||||
|
&self,
|
||||||
|
room_id: &RoomId,
|
||||||
|
user_id: &UserId,
|
||||||
|
shorteventid: u64,
|
||||||
|
) -> Result<()> {
|
||||||
let mut key = room_id.as_bytes().to_vec();
|
let mut key = room_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
key.extend_from_slice(user_id.as_bytes());
|
key.extend_from_slice(user_id.as_bytes());
|
||||||
|
|
||||||
self.roomuserid_privateread
|
if self.private_read_get(room_id, user_id)?.unwrap_or(0) < shorteventid {
|
||||||
.insert(&key, &count.to_be_bytes())?;
|
self.roomuserid_privateread
|
||||||
|
.insert(&key, &shorteventid.to_be_bytes())?;
|
||||||
|
|
||||||
self.roomuserid_lastprivatereadupdate
|
self.roomuserid_lastprivatereadupdate
|
||||||
.insert(&key, &services().globals.next_count()?.to_be_bytes())
|
.insert(&key, &services().globals.next_count()?.to_be_bytes())
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result<Option<u64>> {
|
fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result<Option<u64>> {
|
||||||
|
|
127
src/database/key_value/rooms/edus/typing.rs
Normal file
127
src/database/key_value/rooms/edus/typing.rs
Normal file
|
@ -0,0 +1,127 @@
|
||||||
|
use std::{collections::HashSet, mem};
|
||||||
|
|
||||||
|
use ruma::{OwnedUserId, RoomId, UserId};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||||
|
|
||||||
|
impl service::rooms::edus::typing::Data for KeyValueDatabase {
|
||||||
|
fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> {
|
||||||
|
let mut prefix = room_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
let count = services().globals.next_count()?.to_be_bytes();
|
||||||
|
|
||||||
|
let mut room_typing_id = prefix;
|
||||||
|
room_typing_id.extend_from_slice(&timeout.to_be_bytes());
|
||||||
|
room_typing_id.push(0xff);
|
||||||
|
room_typing_id.extend_from_slice(&count);
|
||||||
|
|
||||||
|
self.typingid_userid
|
||||||
|
.insert(&room_typing_id, user_id.as_bytes())?;
|
||||||
|
|
||||||
|
self.roomid_lasttypingupdate
|
||||||
|
.insert(room_id.as_bytes(), &count)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
||||||
|
let mut prefix = room_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
let user_id = user_id.to_string();
|
||||||
|
|
||||||
|
let mut found_outdated = false;
|
||||||
|
|
||||||
|
// Maybe there are multiple ones from calling roomtyping_add multiple times
|
||||||
|
for outdated_edu in self
|
||||||
|
.typingid_userid
|
||||||
|
.scan_prefix(prefix)
|
||||||
|
.filter(|(_, v)| &**v == user_id.as_bytes())
|
||||||
|
{
|
||||||
|
self.typingid_userid.remove(&outdated_edu.0)?;
|
||||||
|
found_outdated = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if found_outdated {
|
||||||
|
self.roomid_lasttypingupdate.insert(
|
||||||
|
room_id.as_bytes(),
|
||||||
|
&services().globals.next_count()?.to_be_bytes(),
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn typings_maintain(&self, room_id: &RoomId) -> Result<()> {
|
||||||
|
let mut prefix = room_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
let current_timestamp = utils::millis_since_unix_epoch();
|
||||||
|
|
||||||
|
let mut found_outdated = false;
|
||||||
|
|
||||||
|
// Find all outdated edus before inserting a new one
|
||||||
|
for outdated_edu in self
|
||||||
|
.typingid_userid
|
||||||
|
.scan_prefix(prefix)
|
||||||
|
.map(|(key, _)| {
|
||||||
|
Ok::<_, Error>((
|
||||||
|
key.clone(),
|
||||||
|
utils::u64_from_bytes(
|
||||||
|
&key.splitn(2, |&b| b == 0xff).nth(1).ok_or_else(|| {
|
||||||
|
Error::bad_database("RoomTyping has invalid timestamp or delimiters.")
|
||||||
|
})?[0..mem::size_of::<u64>()],
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::bad_database("RoomTyping has invalid timestamp bytes."))?,
|
||||||
|
))
|
||||||
|
})
|
||||||
|
.filter_map(|r| r.ok())
|
||||||
|
.take_while(|&(_, timestamp)| timestamp < current_timestamp)
|
||||||
|
{
|
||||||
|
// This is an outdated edu (time > timestamp)
|
||||||
|
self.typingid_userid.remove(&outdated_edu.0)?;
|
||||||
|
found_outdated = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if found_outdated {
|
||||||
|
self.roomid_lasttypingupdate.insert(
|
||||||
|
room_id.as_bytes(),
|
||||||
|
&services().globals.next_count()?.to_be_bytes(),
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn last_typing_update(&self, room_id: &RoomId) -> Result<u64> {
|
||||||
|
Ok(self
|
||||||
|
.roomid_lasttypingupdate
|
||||||
|
.get(room_id.as_bytes())?
|
||||||
|
.map(|bytes| {
|
||||||
|
utils::u64_from_bytes(&bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.transpose()?
|
||||||
|
.unwrap_or(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn typings_all(&self, room_id: &RoomId) -> Result<HashSet<OwnedUserId>> {
|
||||||
|
let mut prefix = room_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
let mut user_ids = HashSet::new();
|
||||||
|
|
||||||
|
for (_, user_id) in self.typingid_userid.scan_prefix(prefix) {
|
||||||
|
let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| {
|
||||||
|
Error::bad_database("User ID in typingid_userid is invalid unicode.")
|
||||||
|
})?)
|
||||||
|
.map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?;
|
||||||
|
|
||||||
|
user_ids.insert(user_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(user_ids)
|
||||||
|
}
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue