Compare commits
2 commits
next
...
measure-ca
Author | SHA1 | Date | |
---|---|---|---|
|
84d54e0eaf | ||
|
927a23814b |
257 changed files with 22182 additions and 37386 deletions
|
@ -14,8 +14,6 @@ docker-compose*
|
||||||
# Git folder
|
# Git folder
|
||||||
.git
|
.git
|
||||||
.gitea
|
.gitea
|
||||||
.gitlab
|
|
||||||
.github
|
|
||||||
|
|
||||||
# Dot files
|
# Dot files
|
||||||
.env
|
.env
|
||||||
|
@ -25,4 +23,4 @@ docker-compose*
|
||||||
rustfmt.toml
|
rustfmt.toml
|
||||||
|
|
||||||
# Documentation
|
# Documentation
|
||||||
#*.md
|
*.md
|
||||||
|
|
|
@ -1,15 +0,0 @@
|
||||||
# EditorConfig is awesome: https://EditorConfig.org
|
|
||||||
|
|
||||||
root = true
|
|
||||||
|
|
||||||
[*]
|
|
||||||
charset = utf-8
|
|
||||||
end_of_line = lf
|
|
||||||
tab_width = 4
|
|
||||||
indent_size = 4
|
|
||||||
indent_style = space
|
|
||||||
insert_final_newline = true
|
|
||||||
max_line_length = 120
|
|
||||||
|
|
||||||
[*.nix]
|
|
||||||
indent_size = 2
|
|
5
.envrc
5
.envrc
|
@ -1,5 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
use flake
|
|
||||||
|
|
||||||
PATH_add bin
|
|
15
.gitignore
vendored
15
.gitignore
vendored
|
@ -31,6 +31,7 @@ modules.xml
|
||||||
|
|
||||||
### vscode ###
|
### vscode ###
|
||||||
.vscode/*
|
.vscode/*
|
||||||
|
!.vscode/settings.json
|
||||||
!.vscode/tasks.json
|
!.vscode/tasks.json
|
||||||
!.vscode/launch.json
|
!.vscode/launch.json
|
||||||
!.vscode/extensions.json
|
!.vscode/extensions.json
|
||||||
|
@ -56,21 +57,9 @@ $RECYCLE.BIN/
|
||||||
*.lnk
|
*.lnk
|
||||||
|
|
||||||
# Conduit
|
# Conduit
|
||||||
|
Rocket.toml
|
||||||
conduit.toml
|
conduit.toml
|
||||||
conduit.db
|
conduit.db
|
||||||
|
|
||||||
# Etc.
|
# Etc.
|
||||||
**/*.rs.bk
|
**/*.rs.bk
|
||||||
cached_target
|
|
||||||
|
|
||||||
# Nix artifacts
|
|
||||||
/result*
|
|
||||||
|
|
||||||
# Direnv cache
|
|
||||||
/.direnv
|
|
||||||
|
|
||||||
# Gitlab CI cache
|
|
||||||
/.gitlab-ci.d
|
|
||||||
|
|
||||||
# mdbook output
|
|
||||||
public/
|
|
508
.gitlab-ci.yml
508
.gitlab-ci.yml
|
@ -1,197 +1,343 @@
|
||||||
stages:
|
stages:
|
||||||
- ci
|
- build
|
||||||
- artifacts
|
- build docker image
|
||||||
- publish
|
- test
|
||||||
|
- upload artifacts
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
# Makes some things print in color
|
GIT_SUBMODULE_STRATEGY: recursive
|
||||||
TERM: ansi
|
FF_USE_FASTZIP: 1
|
||||||
# Faster cache and artifact compression / decompression
|
CACHE_COMPRESSION_LEVEL: fastest
|
||||||
FF_USE_FASTZIP: true
|
|
||||||
# Print progress reports for cache and artifact transfers
|
|
||||||
TRANSFER_METER_FREQUENCY: 5s
|
|
||||||
|
|
||||||
# Avoid duplicate pipelines
|
|
||||||
# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
|
# --------------------------------------------------------------------- #
|
||||||
workflow:
|
# Cargo: Compiling for different architectures #
|
||||||
|
# --------------------------------------------------------------------- #
|
||||||
|
|
||||||
|
.build-cargo-shared-settings:
|
||||||
|
stage: "build"
|
||||||
|
needs: []
|
||||||
rules:
|
rules:
|
||||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
|
||||||
- if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS
|
interruptible: true
|
||||||
when: never
|
image: "rust:latest"
|
||||||
- if: $CI
|
tags: ["docker"]
|
||||||
|
cache:
|
||||||
before_script:
|
paths:
|
||||||
# Enable nix-command and flakes
|
- cargohome
|
||||||
- if command -v nix > /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi
|
- target/
|
||||||
|
key: "build_cache-$TARGET-release"
|
||||||
# Add our own binary cache
|
variables:
|
||||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://attic.conduit.rs/conduit" >> /etc/nix/nix.conf; fi
|
CARGO_PROFILE_RELEASE_LTO=true
|
||||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduit:ddcaWZiWm0l0IXZlO8FERRdWvEufwmd0Negl1P+c0Ns=" >> /etc/nix/nix.conf; fi
|
CARGO_PROFILE_RELEASE_CODEGEN_UNITS=1
|
||||||
|
before_script:
|
||||||
# Add alternate binary cache
|
- 'echo "Building for target $TARGET"'
|
||||||
- if command -v nix > /dev/null && [ -n "$ATTIC_ENDPOINT" ]; then echo "extra-substituters = $ATTIC_ENDPOINT" >> /etc/nix/nix.conf; fi
|
- 'mkdir -p cargohome && CARGOHOME="cargohome"'
|
||||||
- if command -v nix > /dev/null && [ -n "$ATTIC_PUBLIC_KEY" ]; then echo "extra-trusted-public-keys = $ATTIC_PUBLIC_KEY" >> /etc/nix/nix.conf; fi
|
- "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging
|
||||||
|
- 'apt-get update -yqq'
|
||||||
# Add crane binary cache
|
- 'echo "Installing packages: $NEEDED_PACKAGES"'
|
||||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf; fi
|
- "apt-get install -yqq --no-install-recommends $NEEDED_PACKAGES"
|
||||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf; fi
|
- "rustup target add $TARGET"
|
||||||
|
script:
|
||||||
# Add nix-community binary cache
|
- time cargo build --target $TARGET --release
|
||||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf; fi
|
- 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"'
|
||||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf; fi
|
|
||||||
|
|
||||||
# Install direnv and nix-direnv
|
build:release:cargo:x86_64-unknown-linux-gnu:
|
||||||
- if command -v nix > /dev/null; then nix-env -iA nixpkgs.direnv nixpkgs.nix-direnv; fi
|
extends: .build-cargo-shared-settings
|
||||||
|
variables:
|
||||||
# Allow .envrc
|
TARGET: "x86_64-unknown-linux-gnu"
|
||||||
- if command -v nix > /dev/null; then direnv allow; fi
|
artifacts:
|
||||||
|
name: "conduit-x86_64-unknown-linux-gnu"
|
||||||
# Set CARGO_HOME to a cacheable path
|
paths:
|
||||||
- export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo"
|
- "conduit-x86_64-unknown-linux-gnu"
|
||||||
|
expose_as: "Conduit for x86_64-unknown-linux-gnu"
|
||||||
# Cache attic client
|
|
||||||
- if command -v nix > /dev/null; then ./bin/nix-build-and-cache --inputs-from . attic; fi
|
build:release:cargo:armv7-unknown-linux-gnueabihf:
|
||||||
|
extends: .build-cargo-shared-settings
|
||||||
ci:
|
variables:
|
||||||
stage: ci
|
TARGET: "armv7-unknown-linux-gnueabihf"
|
||||||
image: nixos/nix:2.22.0
|
NEEDED_PACKAGES: "build-essential gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf libc6-dev-armhf-cross"
|
||||||
script:
|
CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER: arm-linux-gnueabihf-gcc
|
||||||
# Cache the inputs required for the devShell
|
CC_armv7_unknown_linux_gnueabihf: arm-linux-gnueabihf-gcc
|
||||||
- ./bin/nix-build-and-cache .#devShells.x86_64-linux.default.inputDerivation
|
CXX_armv7_unknown_linux_gnueabihf: arm-linux-gnueabihf-g++
|
||||||
|
artifacts:
|
||||||
- direnv exec . engage
|
name: "conduit-armv7-unknown-linux-gnueabihf"
|
||||||
|
paths:
|
||||||
|
- "conduit-armv7-unknown-linux-gnueabihf"
|
||||||
|
expose_as: "Conduit for armv7-unknown-linux-gnueabihf"
|
||||||
|
|
||||||
|
build:release:cargo:aarch64-unknown-linux-gnu:
|
||||||
|
extends: .build-cargo-shared-settings
|
||||||
|
variables:
|
||||||
|
TARGET: "aarch64-unknown-linux-gnu"
|
||||||
|
NEEDED_PACKAGES: "build-essential gcc-8-aarch64-linux-gnu g++-aarch64-linux-gnu libc6-dev-arm64-cross"
|
||||||
|
CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER: aarch64-linux-gnu-gcc
|
||||||
|
CC_aarch64_unknown_linux_gnu: aarch64-linux-gnu-gcc
|
||||||
|
CXX_aarch64_unknown_linux_gnu: aarch64-linux-gnu-g++
|
||||||
|
TARGET_CC: "/usr/bin/aarch64-linux-gnu-gcc-8"
|
||||||
|
TARGET_AR: "/usr/bin/aarch64-linux-gnu-gcc-ar-8"
|
||||||
|
artifacts:
|
||||||
|
name: "conduit-aarch64-unknown-linux-gnu"
|
||||||
|
paths:
|
||||||
|
- "conduit-aarch64-unknown-linux-gnu"
|
||||||
|
expose_as: "Conduit for aarch64-unknown-linux-gnu"
|
||||||
|
|
||||||
|
build:release:cargo:x86_64-unknown-linux-musl:
|
||||||
|
extends: .build-cargo-shared-settings
|
||||||
|
image: "rust:alpine"
|
||||||
|
variables:
|
||||||
|
TARGET: "x86_64-unknown-linux-musl"
|
||||||
|
before_script:
|
||||||
|
- 'echo "Building for target $TARGET"'
|
||||||
|
- 'mkdir -p cargohome && CARGOHOME="cargohome"'
|
||||||
|
- "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging
|
||||||
|
- "rustup target add $TARGET"
|
||||||
|
- "apk add libc-dev"
|
||||||
|
artifacts:
|
||||||
|
name: "conduit-x86_64-unknown-linux-musl"
|
||||||
|
paths:
|
||||||
|
- "conduit-x86_64-unknown-linux-musl"
|
||||||
|
expose_as: "Conduit for x86_64-unknown-linux-musl"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
.cargo-debug-shared-settings:
|
||||||
|
extends: ".build-cargo-shared-settings"
|
||||||
|
rules:
|
||||||
|
- if: '$CI_COMMIT_BRANCH'
|
||||||
|
cache:
|
||||||
|
key: "build_cache-$TARGET-debug"
|
||||||
|
script:
|
||||||
|
- "time cargo build --target $TARGET"
|
||||||
|
- 'mv "target/$TARGET/debug/conduit" "conduit-debug-$TARGET"'
|
||||||
|
|
||||||
|
build:debug:cargo:x86_64-unknown-linux-gnu:
|
||||||
|
extends: ".cargo-debug-shared-settings"
|
||||||
|
variables:
|
||||||
|
TARGET: "x86_64-unknown-linux-gnu"
|
||||||
|
artifacts:
|
||||||
|
name: "conduit-debug-x86_64-unknown-linux-gnu"
|
||||||
|
paths:
|
||||||
|
- "conduit-debug-x86_64-unknown-linux-gnu"
|
||||||
|
expose_as: "Conduit DEBUG for x86_64-unknown-linux-gnu"
|
||||||
|
|
||||||
|
build:debug:cargo:x86_64-unknown-linux-musl:
|
||||||
|
extends: ".cargo-debug-shared-settings"
|
||||||
|
image: "rust:alpine"
|
||||||
|
variables:
|
||||||
|
TARGET: "x86_64-unknown-linux-musl"
|
||||||
|
before_script:
|
||||||
|
- 'echo "Building for target $TARGET"'
|
||||||
|
- 'mkdir -p cargohome && CARGOHOME="cargohome"'
|
||||||
|
- "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging
|
||||||
|
- "rustup target add $TARGET"
|
||||||
|
- "apk add libc-dev"
|
||||||
|
artifacts:
|
||||||
|
name: "conduit-debug-x86_64-unknown-linux-musl"
|
||||||
|
paths:
|
||||||
|
- "conduit-debug-x86_64-unknown-linux-musl"
|
||||||
|
expose_as: "Conduit DEBUG for x86_64-unknown-linux-musl"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# --------------------------------------------------------------------- #
|
||||||
|
# Cargo: Compiling deb packages for different architectures #
|
||||||
|
# --------------------------------------------------------------------- #
|
||||||
|
|
||||||
|
|
||||||
|
.build-cargo-deb-shared-settings:
|
||||||
|
stage: "build"
|
||||||
|
needs: [ ]
|
||||||
|
rules:
|
||||||
|
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
|
||||||
|
interruptible: true
|
||||||
|
image: "rust:latest"
|
||||||
|
tags: ["docker"]
|
||||||
|
cache:
|
||||||
|
paths:
|
||||||
|
- cargohome
|
||||||
|
- target/
|
||||||
|
key: "build_cache-deb-$TARGET"
|
||||||
|
before_script:
|
||||||
|
- 'echo "Building debian package for target $TARGET"'
|
||||||
|
- 'mkdir -p cargohome && CARGOHOME="cargohome"'
|
||||||
|
- "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging
|
||||||
|
- 'apt-get update -yqq'
|
||||||
|
- 'echo "Installing packages: $NEEDED_PACKAGES"'
|
||||||
|
- "apt-get install -yqq --no-install-recommends $NEEDED_PACKAGES"
|
||||||
|
- "rustup target add $TARGET"
|
||||||
|
- "cargo install cargo-deb"
|
||||||
|
script:
|
||||||
|
- time cargo deb --target $TARGET
|
||||||
|
- 'mv target/$TARGET/debian/*.deb "conduit-$TARGET.deb"'
|
||||||
|
|
||||||
|
build:cargo-deb:x86_64-unknown-linux-gnu:
|
||||||
|
extends: .build-cargo-deb-shared-settings
|
||||||
|
variables:
|
||||||
|
TARGET: "x86_64-unknown-linux-gnu"
|
||||||
|
NEEDED_PACKAGES: ""
|
||||||
|
artifacts:
|
||||||
|
name: "conduit-x86_64-unknown-linux-gnu.deb"
|
||||||
|
paths:
|
||||||
|
- "conduit-x86_64-unknown-linux-gnu.deb"
|
||||||
|
expose_as: "Debian Package x86_64"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# --------------------------------------------------------------------- #
|
||||||
|
# Create and publish docker image #
|
||||||
|
# --------------------------------------------------------------------- #
|
||||||
|
|
||||||
|
.docker-shared-settings:
|
||||||
|
stage: "build docker image"
|
||||||
|
needs: []
|
||||||
|
interruptible: true
|
||||||
|
image:
|
||||||
|
name: "gcr.io/kaniko-project/executor:debug"
|
||||||
|
entrypoint: [""]
|
||||||
|
tags: ["docker"]
|
||||||
|
variables:
|
||||||
|
# Configure Kaniko Caching: https://cloud.google.com/build/docs/kaniko-cache
|
||||||
|
KANIKO_CACHE_ARGS: "--cache=true --cache-copy-layers=true --cache-ttl=120h --cache-repo $CI_REGISTRY_IMAGE/kaniko-ci-cache"
|
||||||
|
before_script:
|
||||||
|
- "mkdir -p /kaniko/.docker"
|
||||||
|
- 'echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"},\"$DOCKER_HUB\":{\"username\":\"$DOCKER_HUB_USER\",\"password\":\"$DOCKER_HUB_PASSWORD\"}}}" > /kaniko/.docker/config.json'
|
||||||
|
|
||||||
|
|
||||||
|
# Build a docker image by packaging up the x86_64-unknown-linux-musl binary into an alpine image
|
||||||
|
build:docker:main:
|
||||||
|
extends: .docker-shared-settings
|
||||||
|
needs:
|
||||||
|
- "build:release:cargo:x86_64-unknown-linux-musl"
|
||||||
|
script:
|
||||||
|
- >
|
||||||
|
/kaniko/executor
|
||||||
|
$KANIKO_CACHE_ARGS
|
||||||
|
--context $CI_PROJECT_DIR
|
||||||
|
--build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||||
|
--build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)
|
||||||
|
--build-arg "GIT_REF=$CI_COMMIT_REF_NAME"
|
||||||
|
--dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile"
|
||||||
|
--destination "$CI_REGISTRY_IMAGE/conduit:latest"
|
||||||
|
--destination "$CI_REGISTRY_IMAGE/conduit:alpine"
|
||||||
|
--destination "$CI_REGISTRY_IMAGE/conduit:commit-$CI_COMMIT_SHORT_SHA"
|
||||||
|
--destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest"
|
||||||
|
--destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:alpine"
|
||||||
|
--destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:commit-$CI_COMMIT_SHORT_SHA"
|
||||||
|
rules:
|
||||||
|
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# --------------------------------------------------------------------- #
|
||||||
|
# Run tests #
|
||||||
|
# --------------------------------------------------------------------- #
|
||||||
|
|
||||||
|
test:cargo:
|
||||||
|
stage: "test"
|
||||||
|
needs: [ ]
|
||||||
|
image: "rust:latest"
|
||||||
|
tags: [ "docker" ]
|
||||||
|
variables:
|
||||||
|
CARGO_HOME: "cargohome"
|
||||||
cache:
|
cache:
|
||||||
key: nix
|
|
||||||
paths:
|
paths:
|
||||||
- target
|
- target
|
||||||
- .gitlab-ci.d
|
- cargohome
|
||||||
rules:
|
key: test_cache
|
||||||
# CI on upstream runners (only available for maintainers)
|
|
||||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event" && $IS_UPSTREAM_CI == "true"
|
|
||||||
# Manual CI on unprotected branches that are not MRs
|
|
||||||
- if: $CI_PIPELINE_SOURCE != "merge_request_event" && $CI_COMMIT_REF_PROTECTED == "false"
|
|
||||||
when: manual
|
|
||||||
# Manual CI on forks
|
|
||||||
- if: $IS_UPSTREAM_CI != "true"
|
|
||||||
when: manual
|
|
||||||
- if: $CI
|
|
||||||
interruptible: true
|
interruptible: true
|
||||||
|
before_script:
|
||||||
artifacts:
|
- mkdir -p $CARGO_HOME && echo "using $CARGO_HOME to cache cargo deps"
|
||||||
stage: artifacts
|
- apt-get update -yqq
|
||||||
image: nixos/nix:2.22.0
|
- apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config
|
||||||
|
- rustup component add clippy rustfmt
|
||||||
script:
|
script:
|
||||||
- ./bin/nix-build-and-cache .#static-x86_64-unknown-linux-musl
|
- rustc --version && cargo --version # Print version info for debugging
|
||||||
- cp result/bin/conduit x86_64-unknown-linux-musl
|
- cargo fmt --all -- --check
|
||||||
|
- cargo test --workspace --verbose --locked
|
||||||
|
- cargo clippy
|
||||||
|
|
||||||
- mkdir -p target/release
|
test:sytest:
|
||||||
- cp result/bin/conduit target/release
|
stage: "test"
|
||||||
- direnv exec . cargo deb --no-build
|
|
||||||
- mv target/debian/*.deb x86_64-unknown-linux-musl.deb
|
|
||||||
|
|
||||||
# Since the OCI image package is based on the binary package, this has the
|
|
||||||
# fun side effect of uploading the normal binary too. Conduit users who are
|
|
||||||
# deploying with Nix can leverage this fact by adding our binary cache to
|
|
||||||
# their systems.
|
|
||||||
#
|
|
||||||
# Note that although we have an `oci-image-x86_64-unknown-linux-musl`
|
|
||||||
# output, we don't build it because it would be largely redundant to this
|
|
||||||
# one since it's all containerized anyway.
|
|
||||||
- ./bin/nix-build-and-cache .#oci-image
|
|
||||||
- cp result oci-image-amd64.tar.gz
|
|
||||||
|
|
||||||
- ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl
|
|
||||||
- cp result/bin/conduit aarch64-unknown-linux-musl
|
|
||||||
|
|
||||||
- mkdir -p target/aarch64-unknown-linux-musl/release
|
|
||||||
- cp result/bin/conduit target/aarch64-unknown-linux-musl/release
|
|
||||||
- direnv exec . cargo deb --no-strip --no-build --target aarch64-unknown-linux-musl
|
|
||||||
- mv target/aarch64-unknown-linux-musl/debian/*.deb aarch64-unknown-linux-musl.deb
|
|
||||||
|
|
||||||
- ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl
|
|
||||||
- cp result oci-image-arm64v8.tar.gz
|
|
||||||
|
|
||||||
- ./bin/nix-build-and-cache .#book
|
|
||||||
# We can't just copy the symlink, we need to dereference it https://gitlab.com/gitlab-org/gitlab/-/issues/19746
|
|
||||||
- cp -r --dereference result public
|
|
||||||
artifacts:
|
|
||||||
paths:
|
|
||||||
- x86_64-unknown-linux-musl
|
|
||||||
- aarch64-unknown-linux-musl
|
|
||||||
- x86_64-unknown-linux-musl.deb
|
|
||||||
- aarch64-unknown-linux-musl.deb
|
|
||||||
- oci-image-amd64.tar.gz
|
|
||||||
- oci-image-arm64v8.tar.gz
|
|
||||||
- public
|
|
||||||
rules:
|
|
||||||
# CI required for all MRs
|
|
||||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
|
||||||
# Optional CI on forks
|
|
||||||
- if: $IS_UPSTREAM_CI != "true"
|
|
||||||
when: manual
|
|
||||||
allow_failure: true
|
allow_failure: true
|
||||||
- if: $CI
|
needs:
|
||||||
|
- "build:debug:cargo:x86_64-unknown-linux-musl"
|
||||||
|
image:
|
||||||
|
name: "valkum/sytest-conduit:latest"
|
||||||
|
entrypoint: [ "" ]
|
||||||
|
tags: [ "docker" ]
|
||||||
|
variables:
|
||||||
|
PLUGINS: "https://github.com/valkum/sytest_conduit/archive/master.tar.gz"
|
||||||
|
before_script:
|
||||||
|
- "mkdir -p /app"
|
||||||
|
- "cp ./conduit-debug-x86_64-unknown-linux-musl /app/conduit"
|
||||||
|
- "chmod +x /app/conduit"
|
||||||
|
- "rm -rf /src && ln -s $CI_PROJECT_DIR/ /src"
|
||||||
|
- "mkdir -p /work/server-0/database/ && mkdir -p /work/server-1/database/ && mkdir -p /work/server-2/database/"
|
||||||
|
- "cd /"
|
||||||
|
script:
|
||||||
|
- "SYTEST_EXIT_CODE=0"
|
||||||
|
- "/bootstrap.sh conduit || SYTEST_EXIT_CODE=1"
|
||||||
|
- "perl /sytest/tap-to-junit-xml.pl --puretap --input /logs/results.tap --output $CI_PROJECT_DIR/sytest.xml \"Sytest\" && cp /logs/results.tap $CI_PROJECT_DIR/results.tap"
|
||||||
|
- "exit $SYTEST_EXIT_CODE"
|
||||||
|
artifacts:
|
||||||
|
when: always
|
||||||
|
paths:
|
||||||
|
- "$CI_PROJECT_DIR/sytest.xml"
|
||||||
|
- "$CI_PROJECT_DIR/results.tap"
|
||||||
|
reports:
|
||||||
|
junit: "$CI_PROJECT_DIR/sytest.xml"
|
||||||
|
|
||||||
|
|
||||||
|
test:register:element-web-stable:
|
||||||
|
stage: "test"
|
||||||
|
needs:
|
||||||
|
- "build:debug:cargo:x86_64-unknown-linux-gnu"
|
||||||
|
image: "buildkite/puppeteer:latest"
|
||||||
|
tags: [ "docker" ]
|
||||||
interruptible: true
|
interruptible: true
|
||||||
|
|
||||||
.push-oci-image:
|
|
||||||
stage: publish
|
|
||||||
image: docker:25.0.0
|
|
||||||
services:
|
|
||||||
- docker:25.0.0-dind
|
|
||||||
variables:
|
|
||||||
IMAGE_SUFFIX_AMD64: amd64
|
|
||||||
IMAGE_SUFFIX_ARM64V8: arm64v8
|
|
||||||
script:
|
script:
|
||||||
- docker load -i oci-image-amd64.tar.gz
|
- "CONDUIT_CONFIG=tests/test-config.toml ./conduit-debug-x86_64-unknown-linux-gnu > conduit.log &"
|
||||||
- IMAGE_ID_AMD64=$(docker images -q conduit:next)
|
- "cd tests/client-element-web/"
|
||||||
- docker load -i oci-image-arm64v8.tar.gz
|
- "npm install puppeteer"
|
||||||
- IMAGE_ID_ARM64V8=$(docker images -q conduit:next)
|
- "node test-element-web-registration.js \"https://app.element.io/\" \"http://localhost:6167\""
|
||||||
# Tag and push the architecture specific images
|
- "killall --regexp \"conduit\""
|
||||||
- docker tag $IMAGE_ID_AMD64 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64
|
- "cd ../.."
|
||||||
- docker tag $IMAGE_ID_ARM64V8 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
- "cat conduit.log"
|
||||||
- docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64
|
|
||||||
- docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
|
||||||
# Tag the multi-arch image
|
|
||||||
- docker manifest create $IMAGE_NAME:$CI_COMMIT_SHA --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
|
||||||
- docker manifest push $IMAGE_NAME:$CI_COMMIT_SHA
|
|
||||||
# Tag and push the git ref
|
|
||||||
- docker manifest create $IMAGE_NAME:$CI_COMMIT_REF_NAME --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
|
||||||
- docker manifest push $IMAGE_NAME:$CI_COMMIT_REF_NAME
|
|
||||||
# Tag git tags as 'latest'
|
|
||||||
- |
|
|
||||||
if [[ -n "$CI_COMMIT_TAG" ]]; then
|
|
||||||
docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
|
||||||
docker manifest push $IMAGE_NAME:latest
|
|
||||||
fi
|
|
||||||
dependencies:
|
|
||||||
- artifacts
|
|
||||||
only:
|
|
||||||
- next
|
|
||||||
- master
|
|
||||||
- tags
|
|
||||||
|
|
||||||
oci-image:push-gitlab:
|
|
||||||
extends: .push-oci-image
|
|
||||||
variables:
|
|
||||||
IMAGE_NAME: $CI_REGISTRY_IMAGE/matrix-conduit
|
|
||||||
before_script:
|
|
||||||
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
|
||||||
|
|
||||||
oci-image:push-dockerhub:
|
|
||||||
extends: .push-oci-image
|
|
||||||
variables:
|
|
||||||
IMAGE_NAME: matrixconduit/matrix-conduit
|
|
||||||
before_script:
|
|
||||||
- docker login -u $DOCKER_HUB_USER -p $DOCKER_HUB_PASSWORD
|
|
||||||
|
|
||||||
pages:
|
|
||||||
stage: publish
|
|
||||||
dependencies:
|
|
||||||
- artifacts
|
|
||||||
only:
|
|
||||||
- next
|
|
||||||
script:
|
|
||||||
- "true"
|
|
||||||
artifacts:
|
artifacts:
|
||||||
paths:
|
paths:
|
||||||
- public
|
- "tests/client-element-web/*.png"
|
||||||
|
- "*.log"
|
||||||
|
expire_in: 1 week
|
||||||
|
when: always
|
||||||
|
retry: 1
|
||||||
|
|
||||||
|
|
||||||
|
# --------------------------------------------------------------------- #
|
||||||
|
# Store binaries as package so they have download urls #
|
||||||
|
# --------------------------------------------------------------------- #
|
||||||
|
|
||||||
|
publish:package:
|
||||||
|
stage: "upload artifacts"
|
||||||
|
needs:
|
||||||
|
- "build:release:cargo:x86_64-unknown-linux-gnu"
|
||||||
|
- "build:release:cargo:armv7-unknown-linux-gnueabihf"
|
||||||
|
- "build:release:cargo:aarch64-unknown-linux-gnu"
|
||||||
|
- "build:release:cargo:x86_64-unknown-linux-musl"
|
||||||
|
- "build:cargo-deb:x86_64-unknown-linux-gnu"
|
||||||
|
rules:
|
||||||
|
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
|
||||||
|
image: curlimages/curl:latest
|
||||||
|
tags: ["docker"]
|
||||||
|
variables:
|
||||||
|
GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts
|
||||||
|
script:
|
||||||
|
- 'BASE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}"'
|
||||||
|
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-gnu "${BASE_URL}/conduit-x86_64-unknown-linux-gnu"'
|
||||||
|
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-armv7-unknown-linux-gnueabihf "${BASE_URL}/conduit-armv7-unknown-linux-gnueabihf"'
|
||||||
|
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-aarch64-unknown-linux-gnu "${BASE_URL}/conduit-aarch64-unknown-linux-gnu"'
|
||||||
|
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-musl "${BASE_URL}/conduit-x86_64-unknown-linux-musl"'
|
||||||
|
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-gnu.deb "${BASE_URL}/conduit-x86_64-unknown-linux-gnu.deb"'
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,5 +0,0 @@
|
||||||
# Nix things
|
|
||||||
.envrc @CobaltCause
|
|
||||||
flake.lock @CobaltCause
|
|
||||||
flake.nix @CobaltCause
|
|
||||||
nix/ @CobaltCause
|
|
|
@ -1,3 +0,0 @@
|
||||||
# Docs: Map markdown to html files
|
|
||||||
- source: /docs/(.+)\.md/
|
|
||||||
public: '\1.html'
|
|
|
@ -1,37 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
set -eux
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------- #
|
|
||||||
# #
|
|
||||||
# Configures docker buildx to use a remote server for arm building. #
|
|
||||||
# Expects $SSH_PRIVATE_KEY to be a valid ssh ed25519 private key with #
|
|
||||||
# access to the server $ARM_SERVER_USER@$ARM_SERVER_IP #
|
|
||||||
# #
|
|
||||||
# This is expected to only be used in the official CI/CD pipeline! #
|
|
||||||
# #
|
|
||||||
# Requirements: openssh-client, docker buildx #
|
|
||||||
# Inspired by: https://depot.dev/blog/building-arm-containers #
|
|
||||||
# #
|
|
||||||
# --------------------------------------------------------------------- #
|
|
||||||
|
|
||||||
cat "$BUILD_SERVER_SSH_PRIVATE_KEY" | ssh-add -
|
|
||||||
|
|
||||||
# Test server connections:
|
|
||||||
ssh "$ARM_SERVER_USER@$ARM_SERVER_IP" "uname -a"
|
|
||||||
ssh "$AMD_SERVER_USER@$AMD_SERVER_IP" "uname -a"
|
|
||||||
|
|
||||||
# Connect remote arm64 server for all arm builds:
|
|
||||||
docker buildx create \
|
|
||||||
--name "multi" \
|
|
||||||
--driver "docker-container" \
|
|
||||||
--platform "linux/arm64,linux/arm/v7" \
|
|
||||||
"ssh://$ARM_SERVER_USER@$ARM_SERVER_IP"
|
|
||||||
|
|
||||||
# Connect remote amd64 server for adm64 builds:
|
|
||||||
docker buildx create --append \
|
|
||||||
--name "multi" \
|
|
||||||
--driver "docker-container" \
|
|
||||||
--platform "linux/amd64" \
|
|
||||||
"ssh://$AMD_SERVER_USER@$AMD_SERVER_IP"
|
|
||||||
|
|
||||||
docker buildx use multi
|
|
11
.vscode/extensions.json
vendored
11
.vscode/extensions.json
vendored
|
@ -1,11 +0,0 @@
|
||||||
{
|
|
||||||
"recommendations": [
|
|
||||||
"rust-lang.rust-analyzer",
|
|
||||||
"bungcip.better-toml",
|
|
||||||
"ms-azuretools.vscode-docker",
|
|
||||||
"eamodio.gitlens",
|
|
||||||
"serayuzgur.crates",
|
|
||||||
"vadimcn.vscode-lldb",
|
|
||||||
"timonwong.shellcheck"
|
|
||||||
]
|
|
||||||
}
|
|
35
.vscode/launch.json
vendored
35
.vscode/launch.json
vendored
|
@ -1,35 +0,0 @@
|
||||||
{
|
|
||||||
// Use IntelliSense to learn about possible attributes.
|
|
||||||
// Hover to view descriptions of existing attributes.
|
|
||||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
|
||||||
"version": "0.2.0",
|
|
||||||
"configurations": [
|
|
||||||
{
|
|
||||||
"type": "lldb",
|
|
||||||
"request": "launch",
|
|
||||||
"name": "Debug conduit",
|
|
||||||
"sourceLanguages": ["rust"],
|
|
||||||
"cargo": {
|
|
||||||
"args": [
|
|
||||||
"build",
|
|
||||||
"--bin=conduit",
|
|
||||||
"--package=conduit"
|
|
||||||
],
|
|
||||||
"filter": {
|
|
||||||
"name": "conduit",
|
|
||||||
"kind": "bin"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"args": [],
|
|
||||||
"env": {
|
|
||||||
"RUST_BACKTRACE": "1",
|
|
||||||
"CONDUIT_CONFIG": "",
|
|
||||||
"CONDUIT_SERVER_NAME": "localhost",
|
|
||||||
"CONDUIT_DATABASE_PATH": "/tmp",
|
|
||||||
"CONDUIT_ADDRESS": "0.0.0.0",
|
|
||||||
"CONDUIT_PORT": "6167"
|
|
||||||
},
|
|
||||||
"cwd": "${workspaceFolder}"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
3
.vscode/settings.json
vendored
Normal file
3
.vscode/settings.json
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
{
|
||||||
|
"rust-analyzer.procMacro.enable": true
|
||||||
|
}
|
|
@ -2,9 +2,16 @@
|
||||||
|
|
||||||
## Getting help
|
## Getting help
|
||||||
|
|
||||||
If you run into any problems while setting up an Appservice, write an email to `timo@koesters.xyz`, ask us in [#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org) or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new).
|
If you run into any problems while setting up an Appservice, write an email to `timo@koesters.xyz`, ask us in `#conduit:matrix.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new).
|
||||||
|
|
||||||
## Set up the appservice - general instructions
|
## Tested appservices
|
||||||
|
|
||||||
|
Here are some appservices we tested and that work with Conduit:
|
||||||
|
- matrix-appservice-discord
|
||||||
|
- mautrix-hangouts
|
||||||
|
- mautrix-telegram
|
||||||
|
|
||||||
|
## Set up the appservice
|
||||||
|
|
||||||
Follow whatever instructions are given by the appservice. This usually includes
|
Follow whatever instructions are given by the appservice. This usually includes
|
||||||
downloading, changing its config (setting domain, homeserver url, port etc.)
|
downloading, changing its config (setting domain, homeserver url, port etc.)
|
||||||
|
@ -18,7 +25,7 @@ First, go into the #admins room of your homeserver. The first person that
|
||||||
registered on the homeserver automatically joins it. Then send a message into
|
registered on the homeserver automatically joins it. Then send a message into
|
||||||
the room like this:
|
the room like this:
|
||||||
|
|
||||||
@conduit:your.server.name: register-appservice
|
@conduit:your.server.name: register_appservice
|
||||||
```
|
```
|
||||||
paste
|
paste
|
||||||
the
|
the
|
||||||
|
@ -31,7 +38,7 @@ the room like this:
|
||||||
```
|
```
|
||||||
|
|
||||||
You can confirm it worked by sending a message like this:
|
You can confirm it worked by sending a message like this:
|
||||||
`@conduit:your.server.name: list-appservices`
|
`@conduit:your.server.name: list_appservices`
|
||||||
|
|
||||||
The @conduit bot should answer with `Appservices (1): your-bridge`
|
The @conduit bot should answer with `Appservices (1): your-bridge`
|
||||||
|
|
||||||
|
@ -39,23 +46,3 @@ Then you are done. Conduit will send messages to the appservices and the
|
||||||
appservice can send requests to the homeserver. You don't need to restart
|
appservice can send requests to the homeserver. You don't need to restart
|
||||||
Conduit, but if it doesn't work, restarting while the appservice is running
|
Conduit, but if it doesn't work, restarting while the appservice is running
|
||||||
could help.
|
could help.
|
||||||
|
|
||||||
## Appservice-specific instructions
|
|
||||||
|
|
||||||
### Remove an appservice
|
|
||||||
|
|
||||||
To remove an appservice go to your admin room and execute
|
|
||||||
|
|
||||||
`@conduit:your.server.name: unregister-appservice <name>`
|
|
||||||
|
|
||||||
where `<name>` one of the output of `list-appservices`.
|
|
||||||
|
|
||||||
### Tested appservices
|
|
||||||
|
|
||||||
These appservices have been tested and work with Conduit without any extra steps:
|
|
||||||
|
|
||||||
- [matrix-appservice-discord](https://github.com/Half-Shot/matrix-appservice-discord)
|
|
||||||
- [mautrix-hangouts](https://github.com/mautrix/hangouts/)
|
|
||||||
- [mautrix-telegram](https://github.com/mautrix/telegram/)
|
|
||||||
- [mautrix-signal](https://github.com/mautrix/signal/) from version `0.2.2` forward.
|
|
||||||
- [heisenbridge](https://github.com/hifi/heisenbridge/)
|
|
|
@ -1,134 +0,0 @@
|
||||||
|
|
||||||
# Contributor Covenant Code of Conduct
|
|
||||||
|
|
||||||
## Our Pledge
|
|
||||||
|
|
||||||
We as members, contributors, and leaders pledge to make participation in our
|
|
||||||
community a harassment-free experience for everyone, regardless of age, body
|
|
||||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
|
||||||
identity and expression, level of experience, education, socio-economic status,
|
|
||||||
nationality, personal appearance, race, caste, color, religion, or sexual
|
|
||||||
identity and orientation.
|
|
||||||
|
|
||||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
|
||||||
diverse, inclusive, and healthy community.
|
|
||||||
|
|
||||||
## Our Standards
|
|
||||||
|
|
||||||
Examples of behavior that contributes to a positive environment for our
|
|
||||||
community include:
|
|
||||||
|
|
||||||
* Demonstrating empathy and kindness toward other people
|
|
||||||
* Being respectful of differing opinions, viewpoints, and experiences
|
|
||||||
* Giving and gracefully accepting constructive feedback
|
|
||||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
|
||||||
and learning from the experience
|
|
||||||
* Focusing on what is best not just for us as individuals, but for the overall
|
|
||||||
community
|
|
||||||
|
|
||||||
Examples of unacceptable behavior include:
|
|
||||||
|
|
||||||
* The use of sexualized language or imagery, and sexual attention or advances of
|
|
||||||
any kind
|
|
||||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
|
||||||
* Public or private harassment
|
|
||||||
* Publishing others' private information, such as a physical or email address,
|
|
||||||
without their explicit permission
|
|
||||||
* Other conduct which could reasonably be considered inappropriate in a
|
|
||||||
professional setting
|
|
||||||
|
|
||||||
## Enforcement Responsibilities
|
|
||||||
|
|
||||||
Community leaders are responsible for clarifying and enforcing our standards of
|
|
||||||
acceptable behavior and will take appropriate and fair corrective action in
|
|
||||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
|
||||||
or harmful.
|
|
||||||
|
|
||||||
Community leaders have the right and responsibility to remove, edit, or reject
|
|
||||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
|
||||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
|
||||||
decisions when appropriate.
|
|
||||||
|
|
||||||
## Scope
|
|
||||||
|
|
||||||
This Code of Conduct applies within all community spaces, and also applies when
|
|
||||||
an individual is officially representing the community in public spaces.
|
|
||||||
Examples of representing our community include using an official e-mail address,
|
|
||||||
posting via an official social media account, or acting as an appointed
|
|
||||||
representative at an online or offline event.
|
|
||||||
|
|
||||||
## Enforcement
|
|
||||||
|
|
||||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
|
||||||
reported to the community leaders responsible for enforcement over email at
|
|
||||||
coc@koesters.xyz or over Matrix at @timo:conduit.rs.
|
|
||||||
All complaints will be reviewed and investigated promptly and fairly.
|
|
||||||
|
|
||||||
All community leaders are obligated to respect the privacy and security of the
|
|
||||||
reporter of any incident.
|
|
||||||
|
|
||||||
## Enforcement Guidelines
|
|
||||||
|
|
||||||
Community leaders will follow these Community Impact Guidelines in determining
|
|
||||||
the consequences for any action they deem in violation of this Code of Conduct:
|
|
||||||
|
|
||||||
### 1. Correction
|
|
||||||
|
|
||||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
|
||||||
unprofessional or unwelcome in the community.
|
|
||||||
|
|
||||||
**Consequence**: A private, written warning from community leaders, providing
|
|
||||||
clarity around the nature of the violation and an explanation of why the
|
|
||||||
behavior was inappropriate. A public apology may be requested.
|
|
||||||
|
|
||||||
### 2. Warning
|
|
||||||
|
|
||||||
**Community Impact**: A violation through a single incident or series of
|
|
||||||
actions.
|
|
||||||
|
|
||||||
**Consequence**: A warning with consequences for continued behavior. No
|
|
||||||
interaction with the people involved, including unsolicited interaction with
|
|
||||||
those enforcing the Code of Conduct, for a specified period of time. This
|
|
||||||
includes avoiding interactions in community spaces as well as external channels
|
|
||||||
like social media. Violating these terms may lead to a temporary or permanent
|
|
||||||
ban.
|
|
||||||
|
|
||||||
### 3. Temporary Ban
|
|
||||||
|
|
||||||
**Community Impact**: A serious violation of community standards, including
|
|
||||||
sustained inappropriate behavior.
|
|
||||||
|
|
||||||
**Consequence**: A temporary ban from any sort of interaction or public
|
|
||||||
communication with the community for a specified period of time. No public or
|
|
||||||
private interaction with the people involved, including unsolicited interaction
|
|
||||||
with those enforcing the Code of Conduct, is allowed during this period.
|
|
||||||
Violating these terms may lead to a permanent ban.
|
|
||||||
|
|
||||||
### 4. Permanent Ban
|
|
||||||
|
|
||||||
**Community Impact**: Demonstrating a pattern of violation of community
|
|
||||||
standards, including sustained inappropriate behavior, harassment of an
|
|
||||||
individual, or aggression toward or disparagement of classes of individuals.
|
|
||||||
|
|
||||||
**Consequence**: A permanent ban from any sort of public interaction within the
|
|
||||||
community.
|
|
||||||
|
|
||||||
## Attribution
|
|
||||||
|
|
||||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
|
||||||
version 2.1, available at
|
|
||||||
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
|
|
||||||
|
|
||||||
Community Impact Guidelines were inspired by
|
|
||||||
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
|
||||||
|
|
||||||
For answers to common questions about this code of conduct, see the FAQ at
|
|
||||||
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
|
|
||||||
[https://www.contributor-covenant.org/translations][translations].
|
|
||||||
|
|
||||||
[homepage]: https://www.contributor-covenant.org
|
|
||||||
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
|
|
||||||
[Mozilla CoC]: https://github.com/mozilla/diversity
|
|
||||||
[FAQ]: https://www.contributor-covenant.org/faq
|
|
||||||
[translations]: https://www.contributor-covenant.org/translations
|
|
||||||
|
|
11
CROSS_COMPILE.md
Normal file
11
CROSS_COMPILE.md
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
Install docker:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ sudo apt install docker
|
||||||
|
$ sudo usermod -aG docker $USER
|
||||||
|
$ exec sudo su -l $USER
|
||||||
|
$ sudo systemctl start docker
|
||||||
|
$ cargo install cross
|
||||||
|
$ cross build --release --target armv7-unknown-linux-musleabihf
|
||||||
|
```
|
||||||
|
The cross-compiled binary is at target/armv7-unknown-linux-musleabihf/release/conduit
|
3800
Cargo.lock
generated
3800
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
292
Cargo.toml
292
Cargo.toml
|
@ -1,191 +1,92 @@
|
||||||
[workspace.lints.rust]
|
|
||||||
explicit_outlives_requirements = "warn"
|
|
||||||
unused_qualifications = "warn"
|
|
||||||
|
|
||||||
[workspace.lints.clippy]
|
|
||||||
cloned_instead_of_copied = "warn"
|
|
||||||
dbg_macro = "warn"
|
|
||||||
str_to_string = "warn"
|
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
authors = ["timokoesters <timo@koesters.xyz>"]
|
|
||||||
description = "A Matrix homeserver written in Rust"
|
|
||||||
edition = "2021"
|
|
||||||
homepage = "https://conduit.rs"
|
|
||||||
license = "Apache-2.0"
|
|
||||||
name = "conduit"
|
name = "conduit"
|
||||||
readme = "README.md"
|
description = "A Matrix homeserver written in Rust"
|
||||||
|
license = "Apache-2.0"
|
||||||
|
authors = ["timokoesters <timo@koesters.xyz>"]
|
||||||
|
homepage = "https://conduit.rs"
|
||||||
repository = "https://gitlab.com/famedly/conduit"
|
repository = "https://gitlab.com/famedly/conduit"
|
||||||
version = "0.10.0-alpha"
|
readme = "README.md"
|
||||||
|
version = "0.1.0"
|
||||||
# See also `rust-toolchain.toml`
|
edition = "2018"
|
||||||
rust-version = "1.79.0"
|
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[lints]
|
|
||||||
workspace = true
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
# Web framework
|
# Used to handle requests
|
||||||
axum = { version = "0.7", default-features = false, features = [
|
# TODO: This can become optional as soon as proper configs are supported
|
||||||
"form",
|
# rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "801e04bd5369eb39e126c75f6d11e1e9597304d8", features = ["tls"] } # Used to handle requests
|
||||||
"http1",
|
rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle requests
|
||||||
"http2",
|
|
||||||
"json",
|
|
||||||
"matched-path",
|
|
||||||
], optional = true }
|
|
||||||
axum-extra = { version = "0.9", features = ["typed-header"] }
|
|
||||||
axum-server = { version = "0.6", features = ["tls-rustls"] }
|
|
||||||
tower = { version = "0.4.13", features = ["util"] }
|
|
||||||
tower-http = { version = "0.5", features = [
|
|
||||||
"add-extension",
|
|
||||||
"cors",
|
|
||||||
"sensitive-headers",
|
|
||||||
"trace",
|
|
||||||
"util",
|
|
||||||
] }
|
|
||||||
tower-service = "0.3"
|
|
||||||
|
|
||||||
# Async runtime and utilities
|
|
||||||
tokio = { version = "1.28.1", features = ["fs", "macros", "signal", "sync"] }
|
|
||||||
# Used for storing data permanently
|
|
||||||
#sled = { version = "0.34.7", features = ["compression", "no_metrics"], optional = true }
|
|
||||||
#sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] }
|
|
||||||
persy = { version = "1.4.4", optional = true, features = ["background_ops"] }
|
|
||||||
|
|
||||||
# Used for the http request / response body type for Ruma endpoints used with reqwest
|
|
||||||
bytes = "1.4.0"
|
|
||||||
http = "1"
|
|
||||||
# Used to find data directory for default db path
|
|
||||||
directories = "5"
|
|
||||||
# Used for ruma wrapper
|
|
||||||
serde_json = { version = "1.0.96", features = ["raw_value"] }
|
|
||||||
# Used for appservice registration files
|
|
||||||
serde_yaml = "0.9.21"
|
|
||||||
# Used for pdu definition
|
|
||||||
serde = { version = "1.0.163", features = ["rc"] }
|
|
||||||
# Used for secure identifiers
|
|
||||||
rand = "0.8.5"
|
|
||||||
# Used to hash passwords
|
|
||||||
rust-argon2 = "2"
|
|
||||||
# Used to send requests
|
|
||||||
hyper = "1.1"
|
|
||||||
hyper-util = { version = "0.1", features = [
|
|
||||||
"client",
|
|
||||||
"client-legacy",
|
|
||||||
"http1",
|
|
||||||
"http2",
|
|
||||||
] }
|
|
||||||
reqwest = { version = "0.12", default-features = false, features = [
|
|
||||||
"rustls-tls-native-roots",
|
|
||||||
"socks",
|
|
||||||
] }
|
|
||||||
# Used for conduit::Error type
|
|
||||||
thiserror = "1.0.40"
|
|
||||||
# Used to generate thumbnails for images
|
|
||||||
image = { version = "0.25", default-features = false, features = [
|
|
||||||
"gif",
|
|
||||||
"jpeg",
|
|
||||||
"png",
|
|
||||||
] }
|
|
||||||
# Used to encode server public key
|
|
||||||
base64 = "0.22"
|
|
||||||
# Used when hashing the state
|
|
||||||
ring = "0.17.7"
|
|
||||||
# Used when querying the SRV record of other servers
|
|
||||||
hickory-resolver = "0.24"
|
|
||||||
# Used to find matching events for appservices
|
|
||||||
regex = "1.8.1"
|
|
||||||
# jwt jsonwebtokens
|
|
||||||
jsonwebtoken = "9.2.0"
|
|
||||||
# Performance measurements
|
|
||||||
opentelemetry = "0.22"
|
|
||||||
opentelemetry-jaeger-propagator = "0.1"
|
|
||||||
opentelemetry-otlp = "0.15"
|
|
||||||
opentelemetry_sdk = { version = "0.22", features = ["rt-tokio"] }
|
|
||||||
tracing = "0.1.37"
|
|
||||||
tracing-flame = "0.2.0"
|
|
||||||
tracing-opentelemetry = "0.23"
|
|
||||||
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
|
|
||||||
|
|
||||||
lru-cache = "0.1.2"
|
|
||||||
parking_lot = { version = "0.12.1", optional = true }
|
|
||||||
rusqlite = { version = "0.31", optional = true, features = ["bundled"] }
|
|
||||||
|
|
||||||
# crossbeam = { version = "0.8.2", optional = true }
|
|
||||||
num_cpus = "1.15.0"
|
|
||||||
threadpool = "1.8.1"
|
|
||||||
# heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
|
|
||||||
# Used for ruma wrapper
|
|
||||||
serde_html_form = "0.2.0"
|
|
||||||
|
|
||||||
thread_local = "1.1.7"
|
|
||||||
# used for TURN server authentication
|
|
||||||
hmac = "0.12.1"
|
|
||||||
sha-1 = "0.10.1"
|
|
||||||
# used for conduit's CLI and admin room command parsing
|
|
||||||
clap = { version = "4.3.0", default-features = false, features = [
|
|
||||||
"derive",
|
|
||||||
"error-context",
|
|
||||||
"help",
|
|
||||||
"std",
|
|
||||||
"string",
|
|
||||||
"usage",
|
|
||||||
] }
|
|
||||||
futures-util = { version = "0.3.28", default-features = false }
|
|
||||||
# Used for reading the configuration from conduit.toml & environment variables
|
|
||||||
figment = { version = "0.10.8", features = ["env", "toml"] }
|
|
||||||
|
|
||||||
# Validating urls in config
|
|
||||||
url = { version = "2", features = ["serde"] }
|
|
||||||
|
|
||||||
async-trait = "0.1.68"
|
|
||||||
tikv-jemallocator = { version = "0.5.0", features = [
|
|
||||||
"unprefixed_malloc_on_supported_platforms",
|
|
||||||
], optional = true }
|
|
||||||
|
|
||||||
sd-notify = { version = "0.4.1", optional = true }
|
|
||||||
|
|
||||||
# Used for matrix spec type definitions and helpers
|
# Used for matrix spec type definitions and helpers
|
||||||
[dependencies.ruma]
|
#ruma = { git = "https://github.com/ruma/ruma", rev = "f5ab038e22421ed338396ece977b6b2844772ced", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
||||||
features = [
|
ruma = { git = "https://github.com/timokoesters/ruma", rev = "2215049b60a1c3358f5a52215adf1e7bb88619a1", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
||||||
"appservice-api-c",
|
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
||||||
"client-api",
|
|
||||||
"compat",
|
|
||||||
"federation-api",
|
|
||||||
"push-gateway-api-c",
|
|
||||||
"rand",
|
|
||||||
"ring-compat",
|
|
||||||
"server-util",
|
|
||||||
"state-res",
|
|
||||||
"unstable-exhaustive-types",
|
|
||||||
"unstable-msc2448",
|
|
||||||
"unstable-msc3575",
|
|
||||||
"unstable-unspecified",
|
|
||||||
]
|
|
||||||
git = "https://github.com/ruma/ruma"
|
|
||||||
|
|
||||||
[dependencies.rocksdb]
|
# Used for long polling and federation sender, should be the same as rocket::tokio
|
||||||
features = ["lz4", "multi-threaded-cf", "zstd"]
|
tokio = "1.8.2"
|
||||||
optional = true
|
# Used for storing data permanently
|
||||||
package = "rust-rocksdb"
|
sled = { version = "0.34.6", features = ["compression", "no_metrics"], optional = true }
|
||||||
version = "0.25"
|
#sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] }
|
||||||
|
|
||||||
[target.'cfg(unix)'.dependencies]
|
# Used for the http request / response body type for Ruma endpoints used with reqwest
|
||||||
nix = { version = "0.28", features = ["resource"] }
|
bytes = "1.0.1"
|
||||||
|
# Used for rocket<->ruma conversions
|
||||||
|
http = "0.2.4"
|
||||||
|
# Used to find data directory for default db path
|
||||||
|
directories = "3.0.2"
|
||||||
|
# Used for ruma wrapper
|
||||||
|
serde_json = { version = "1.0.64", features = ["raw_value"] }
|
||||||
|
# Used for appservice registration files
|
||||||
|
serde_yaml = "0.8.17"
|
||||||
|
# Used for pdu definition
|
||||||
|
serde = "1.0.126"
|
||||||
|
# Used for secure identifiers
|
||||||
|
rand = "0.8.4"
|
||||||
|
# Used to hash passwords
|
||||||
|
rust-argon2 = "0.8.3"
|
||||||
|
# Used to send requests
|
||||||
|
reqwest = { version = "0.11.4", default-features = false, features = ["rustls-tls-native-roots", "socks"] }
|
||||||
|
# Custom TLS verifier
|
||||||
|
rustls = { version = "0.19.1", features = ["dangerous_configuration"] }
|
||||||
|
rustls-native-certs = "0.5.0"
|
||||||
|
webpki = "0.21.0"
|
||||||
|
# Used for conduit::Error type
|
||||||
|
thiserror = "1.0.26"
|
||||||
|
# Used to generate thumbnails for images
|
||||||
|
image = { version = "0.23.14", default-features = false, features = ["jpeg", "png", "gif"] }
|
||||||
|
# Used to encode server public key
|
||||||
|
base64 = "0.13.0"
|
||||||
|
# Used when hashing the state
|
||||||
|
ring = "0.16.20"
|
||||||
|
# Used when querying the SRV record of other servers
|
||||||
|
trust-dns-resolver = "0.20.3"
|
||||||
|
# Used to find matching events for appservices
|
||||||
|
regex = "1.5.4"
|
||||||
|
# jwt jsonwebtokens
|
||||||
|
jsonwebtoken = "7.2.0"
|
||||||
|
# Performance measurements
|
||||||
|
tracing = { version = "0.1.26", features = ["release_max_level_warn"] }
|
||||||
|
tracing-subscriber = "0.2.19"
|
||||||
|
tracing-opentelemetry = "0.14.0"
|
||||||
|
tracing-flame = "0.1.0"
|
||||||
|
opentelemetry = { version = "0.16.0", features = ["rt-tokio"] }
|
||||||
|
opentelemetry-jaeger = { version = "0.15.0", features = ["rt-tokio"] }
|
||||||
|
pretty_env_logger = "0.4.0"
|
||||||
|
lru-cache = "0.1.2"
|
||||||
|
rusqlite = { version = "0.25.3", optional = true, features = ["bundled"] }
|
||||||
|
parking_lot = { version = "0.11.1", optional = true }
|
||||||
|
crossbeam = { version = "0.8.1", optional = true }
|
||||||
|
num_cpus = "1.13.0"
|
||||||
|
threadpool = "1.8.1"
|
||||||
|
heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["backend_rocksdb", "backend_sqlite", "conduit_bin", "systemd"]
|
default = ["conduit_bin", "backend_sqlite"]
|
||||||
#backend_sled = ["sled"]
|
backend_sled = ["sled"]
|
||||||
backend_persy = ["parking_lot", "persy"]
|
|
||||||
backend_sqlite = ["sqlite"]
|
backend_sqlite = ["sqlite"]
|
||||||
#backend_heed = ["heed", "crossbeam"]
|
backend_heed = ["heed", "crossbeam"]
|
||||||
backend_rocksdb = ["rocksdb"]
|
sqlite = ["rusqlite", "parking_lot", "crossbeam", "tokio/signal"]
|
||||||
conduit_bin = ["axum"]
|
conduit_bin = [] # TODO: add rocket to this when it is optional
|
||||||
jemalloc = ["tikv-jemallocator"]
|
|
||||||
sqlite = ["parking_lot", "rusqlite", "tokio/signal"]
|
|
||||||
systemd = ["sd-notify"]
|
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
name = "conduit"
|
name = "conduit"
|
||||||
|
@ -197,45 +98,36 @@ name = "conduit"
|
||||||
path = "src/lib.rs"
|
path = "src/lib.rs"
|
||||||
|
|
||||||
[package.metadata.deb]
|
[package.metadata.deb]
|
||||||
assets = [
|
name = "matrix-conduit"
|
||||||
[
|
maintainer = "Paul van Tilburg <paul@luon.net>"
|
||||||
"README.md",
|
|
||||||
"usr/share/doc/matrix-conduit/",
|
|
||||||
"644",
|
|
||||||
],
|
|
||||||
[
|
|
||||||
"debian/README.md",
|
|
||||||
"usr/share/doc/matrix-conduit/README.Debian",
|
|
||||||
"644",
|
|
||||||
],
|
|
||||||
[
|
|
||||||
"target/release/conduit",
|
|
||||||
"usr/sbin/matrix-conduit",
|
|
||||||
"755",
|
|
||||||
],
|
|
||||||
]
|
|
||||||
conf-files = ["/etc/matrix-conduit/conduit.toml"]
|
|
||||||
copyright = "2020, Timo Kösters <timo@koesters.xyz>"
|
copyright = "2020, Timo Kösters <timo@koesters.xyz>"
|
||||||
|
license-file = ["LICENSE", "3"]
|
||||||
depends = "$auto, ca-certificates"
|
depends = "$auto, ca-certificates"
|
||||||
extended-description = """\
|
extended-description = """\
|
||||||
A fast Matrix homeserver that is optimized for smaller, personal servers, \
|
A fast Matrix homeserver that is optimized for smaller, personal servers, \
|
||||||
instead of a server that has high scalability."""
|
instead of a server that has high scalability."""
|
||||||
license-file = ["LICENSE", "3"]
|
|
||||||
maintainer = "Paul van Tilburg <paul@luon.net>"
|
|
||||||
maintainer-scripts = "debian/"
|
|
||||||
name = "matrix-conduit"
|
|
||||||
priority = "optional"
|
|
||||||
section = "net"
|
section = "net"
|
||||||
|
priority = "optional"
|
||||||
|
assets = [
|
||||||
|
["debian/README.Debian", "usr/share/doc/matrix-conduit/", "644"],
|
||||||
|
["README.md", "usr/share/doc/matrix-conduit/", "644"],
|
||||||
|
["target/release/conduit", "usr/sbin/matrix-conduit", "755"],
|
||||||
|
]
|
||||||
|
conf-files = [
|
||||||
|
"/etc/matrix-conduit/conduit.toml"
|
||||||
|
]
|
||||||
|
maintainer-scripts = "debian/"
|
||||||
systemd-units = { unit-name = "matrix-conduit" }
|
systemd-units = { unit-name = "matrix-conduit" }
|
||||||
|
|
||||||
[profile.dev]
|
[profile.dev]
|
||||||
|
lto = 'thin'
|
||||||
incremental = true
|
incremental = true
|
||||||
lto = 'off'
|
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
codegen-units = 32
|
|
||||||
incremental = true
|
|
||||||
lto = 'thin'
|
lto = 'thin'
|
||||||
|
incremental = true
|
||||||
|
|
||||||
|
codegen-units=32
|
||||||
# If you want to make flamegraphs, enable debug info:
|
# If you want to make flamegraphs, enable debug info:
|
||||||
# debug = true
|
# debug = true
|
||||||
|
|
||||||
|
|
234
DEPLOY.md
Normal file
234
DEPLOY.md
Normal file
|
@ -0,0 +1,234 @@
|
||||||
|
# Deploying Conduit
|
||||||
|
|
||||||
|
## Getting help
|
||||||
|
|
||||||
|
If you run into any problems while setting up Conduit, write an email to `timo@koesters.xyz`, ask us in `#conduit:matrix.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new).
|
||||||
|
|
||||||
|
## Installing Conduit
|
||||||
|
|
||||||
|
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url:
|
||||||
|
|
||||||
|
| CPU Architecture | GNU (Ubuntu, Debian, ArchLinux, ...) | MUSL (Alpine, ... ) |
|
||||||
|
| -------------------- | ------------------------------------- | ----------------------- |
|
||||||
|
| x84_64 / amd64 | [Download][x84_64-gnu] | [Download][x84_64-musl] |
|
||||||
|
| armv7 (Raspberry Pi) | [Download][armv7-gnu] | - |
|
||||||
|
| armv8 / aarch64 | [Download][armv8-gnu] | - |
|
||||||
|
|
||||||
|
[x84_64-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-gnu?job=build:release:cargo:x86_64-unknown-linux-gnu
|
||||||
|
|
||||||
|
[x84_64-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl
|
||||||
|
|
||||||
|
[armv7-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-gnueabihf?job=build:release:cargo:armv7-unknown-linux-gnueabihf
|
||||||
|
|
||||||
|
[armv8-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-gnu?job=build:release:cargo:aarch64-unknown-linux-gnu
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ sudo wget -O /usr/local/bin/matrix-conduit <url>
|
||||||
|
$ sudo chmod +x /usr/local/bin/matrix-conduit
|
||||||
|
```
|
||||||
|
|
||||||
|
Alternatively, you may compile the binary yourself using
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ cargo build --release
|
||||||
|
```
|
||||||
|
Note that this currently requires Rust 1.50.
|
||||||
|
|
||||||
|
If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](CROSS_COMPILE.md).
|
||||||
|
|
||||||
|
|
||||||
|
## Adding a Conduit user
|
||||||
|
|
||||||
|
While Conduit can run as any user it is usually better to use dedicated users for different services.
|
||||||
|
This also allows you to make sure that the file permissions are correctly set up.
|
||||||
|
|
||||||
|
In Debian you can use this command to create a Conduit user:
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo adduser --system conduit --no-create-home
|
||||||
|
```
|
||||||
|
|
||||||
|
## Setting up a systemd service
|
||||||
|
|
||||||
|
Now we'll set up a systemd service for Conduit, so it's easy to start/stop
|
||||||
|
Conduit and set it to autostart when your server reboots. Simply paste the
|
||||||
|
default systemd service you can find below into
|
||||||
|
`/etc/systemd/system/conduit.service`.
|
||||||
|
|
||||||
|
```systemd
|
||||||
|
[Unit]
|
||||||
|
Description=Conduit Matrix Server
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Environment="CONDUIT_CONFIG=/etc/matrix-conduit/conduit.toml"
|
||||||
|
User=conduit
|
||||||
|
Group=nogroup
|
||||||
|
Restart=always
|
||||||
|
ExecStart=/usr/local/bin/matrix-conduit
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
```
|
||||||
|
|
||||||
|
Finally, run
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ sudo systemctl daemon-reload
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Creating the Conduit configuration file
|
||||||
|
|
||||||
|
Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste this in **and take a moment to read it. You need to change at least the server name.**
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[global]
|
||||||
|
# The server_name is the name of this server. It is used as a suffix for user
|
||||||
|
# and room ids. Examples: matrix.org, conduit.rs
|
||||||
|
# The Conduit server needs to be reachable at https://your.server.name/ on port
|
||||||
|
# 443 (client-server) and 8448 (federation) OR you can create /.well-known
|
||||||
|
# files to redirect requests. See
|
||||||
|
# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client
|
||||||
|
# and https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server
|
||||||
|
# for more information
|
||||||
|
|
||||||
|
# YOU NEED TO EDIT THIS
|
||||||
|
#server_name = "your.server.name"
|
||||||
|
|
||||||
|
# This is the only directory where Conduit will save its data
|
||||||
|
database_path = "/var/lib/matrix-conduit/conduit_db"
|
||||||
|
|
||||||
|
# The port Conduit will be running on. You need to set up a reverse proxy in
|
||||||
|
# your web server (e.g. apache or nginx), so all requests to /_matrix on port
|
||||||
|
# 443 and 8448 will be forwarded to the Conduit instance running on this port
|
||||||
|
port = 6167
|
||||||
|
|
||||||
|
# Max size for uploads
|
||||||
|
max_request_size = 20_000_000 # in bytes
|
||||||
|
|
||||||
|
# Disabling registration means no new users will be able to register on this server
|
||||||
|
allow_registration = false
|
||||||
|
|
||||||
|
# Disable encryption, so no new encrypted rooms can be created
|
||||||
|
# Note: existing rooms will continue to work
|
||||||
|
allow_encryption = true
|
||||||
|
allow_federation = true
|
||||||
|
|
||||||
|
trusted_servers = ["matrix.org"]
|
||||||
|
|
||||||
|
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||||
|
#workers = 4 # default: cpu core count * 2
|
||||||
|
|
||||||
|
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
||||||
|
|
||||||
|
# The total amount of memory that the database will use.
|
||||||
|
#db_cache_capacity_mb = 200
|
||||||
|
```
|
||||||
|
|
||||||
|
## Setting the correct file permissions
|
||||||
|
|
||||||
|
As we are using a Conduit specific user we need to allow it to read the config.
|
||||||
|
To do that you can run this command on Debian:
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo chown -R conduit:nogroup /etc/matrix-conduit
|
||||||
|
```
|
||||||
|
|
||||||
|
If you use the default database path you also need to run this:
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo mkdir -p /var/lib/matrix-conduit/conduit_db
|
||||||
|
sudo chown -R conduit:nogroup /var/lib/matrix-conduit/conduit_db
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Setting up the Reverse Proxy
|
||||||
|
|
||||||
|
This depends on whether you use Apache, Nginx or another web server.
|
||||||
|
|
||||||
|
### Apache
|
||||||
|
|
||||||
|
Create `/etc/apache2/sites-enabled/050-conduit.conf` and copy-and-paste this:
|
||||||
|
|
||||||
|
```
|
||||||
|
Listen 8448
|
||||||
|
|
||||||
|
<VirtualHost *:443 *:8448>
|
||||||
|
|
||||||
|
ServerName your.server.name # EDIT THIS
|
||||||
|
|
||||||
|
AllowEncodedSlashes NoDecode
|
||||||
|
ProxyPass /_matrix/ http://127.0.0.1:6167/_matrix/ nocanon
|
||||||
|
ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/
|
||||||
|
|
||||||
|
Include /etc/letsencrypt/options-ssl-apache.conf
|
||||||
|
SSLCertificateFile /etc/letsencrypt/live/your.server.name/fullchain.pem # EDIT THIS
|
||||||
|
SSLCertificateKeyFile /etc/letsencrypt/live/your.server.name/privkey.pem # EDIT THIS
|
||||||
|
</VirtualHost>
|
||||||
|
```
|
||||||
|
|
||||||
|
**You need to make some edits again.** When you are done, run
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ sudo systemctl reload apache2
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Nginx
|
||||||
|
|
||||||
|
If you use Nginx and not Apache, add the following server section inside the
|
||||||
|
http section of `/etc/nginx/nginx.conf`
|
||||||
|
|
||||||
|
```
|
||||||
|
server {
|
||||||
|
listen 443 ssl http2;
|
||||||
|
listen [::]:443 ssl http2;
|
||||||
|
listen 8448 ssl http2;
|
||||||
|
listen [::]:8448 ssl http2;
|
||||||
|
server_name your.server.name; # EDIT THIS
|
||||||
|
merge_slashes off;
|
||||||
|
|
||||||
|
location /_matrix/ {
|
||||||
|
proxy_pass http://127.0.0.1:6167$request_uri;
|
||||||
|
proxy_set_header Host $http_host;
|
||||||
|
proxy_buffering off;
|
||||||
|
}
|
||||||
|
|
||||||
|
ssl_certificate /etc/letsencrypt/live/your.server.name/fullchain.pem; # EDIT THIS
|
||||||
|
ssl_certificate_key /etc/letsencrypt/live/your.server.name/privkey.pem; # EDIT THIS
|
||||||
|
ssl_trusted_certificate /etc/letsencrypt/live/your.server.name/chain.pem; # EDIT THIS
|
||||||
|
include /etc/letsencrypt/options-ssl-nginx.conf;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
**You need to make some edits again.** When you are done, run
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ sudo systemctl reload nginx
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## SSL Certificate
|
||||||
|
|
||||||
|
The easiest way to get an SSL certificate, if you don't have one already, is to install `certbot` and run this:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ sudo certbot -d your.server.name
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## You're done!
|
||||||
|
|
||||||
|
Now you can start Conduit with:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ sudo systemctl start conduit
|
||||||
|
```
|
||||||
|
|
||||||
|
Set it to start automatically when your system boots with:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ sudo systemctl enable conduit
|
||||||
|
```
|
||||||
|
|
||||||
|
If you want to set up an appservice, take a look at the [Appservice Guide](APPSERVICES.md).
|
95
Dockerfile
Normal file
95
Dockerfile
Normal file
|
@ -0,0 +1,95 @@
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
# Alpine build image to build Conduit's statically compiled binary
|
||||||
|
FROM alpine:3.14 as builder
|
||||||
|
|
||||||
|
# Specifies if the local project is build or if Conduit gets build
|
||||||
|
# from the official git repository. Defaults to the git repo.
|
||||||
|
ARG LOCAL=false
|
||||||
|
# Specifies which revision/commit is build. Defaults to HEAD
|
||||||
|
ARG GIT_REF=origin/master
|
||||||
|
|
||||||
|
# Install packages needed for building all crates
|
||||||
|
RUN apk add --no-cache \
|
||||||
|
cargo \
|
||||||
|
openssl-dev
|
||||||
|
|
||||||
|
|
||||||
|
# Copy project files from current folder
|
||||||
|
COPY . .
|
||||||
|
# Build it from the copied local files or from the official git repository
|
||||||
|
RUN if [[ $LOCAL == "true" ]]; then \
|
||||||
|
cargo install --path . ; \
|
||||||
|
else \
|
||||||
|
cargo install --git "https://gitlab.com/famedly/conduit.git" --rev ${GIT_REF}; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
########################## RUNTIME IMAGE ##########################
|
||||||
|
# Create new stage with a minimal image for the actual
|
||||||
|
# runtime image/container
|
||||||
|
FROM alpine:3.14
|
||||||
|
|
||||||
|
ARG CREATED
|
||||||
|
ARG VERSION
|
||||||
|
ARG GIT_REF=origin/master
|
||||||
|
|
||||||
|
ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml"
|
||||||
|
|
||||||
|
# Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md
|
||||||
|
# including a custom label specifying the build command
|
||||||
|
LABEL org.opencontainers.image.created=${CREATED} \
|
||||||
|
org.opencontainers.image.authors="Conduit Contributors" \
|
||||||
|
org.opencontainers.image.title="Conduit" \
|
||||||
|
org.opencontainers.image.version=${VERSION} \
|
||||||
|
org.opencontainers.image.vendor="Conduit Contributors" \
|
||||||
|
org.opencontainers.image.description="A Matrix homeserver written in Rust" \
|
||||||
|
org.opencontainers.image.url="https://conduit.rs/" \
|
||||||
|
org.opencontainers.image.revision=${GIT_REF} \
|
||||||
|
org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \
|
||||||
|
org.opencontainers.image.licenses="Apache-2.0" \
|
||||||
|
org.opencontainers.image.documentation="" \
|
||||||
|
org.opencontainers.image.ref.name="" \
|
||||||
|
org.label-schema.docker.build="docker build . -t matrixconduit/matrix-conduit:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)" \
|
||||||
|
maintainer="Weasy666"
|
||||||
|
|
||||||
|
# Standard port on which Conduit launches. You still need to map the port when using the docker command or docker-compose.
|
||||||
|
EXPOSE 6167
|
||||||
|
|
||||||
|
# Copy config files from context and the binary from
|
||||||
|
# the "builder" stage to the current stage into folder
|
||||||
|
# /srv/conduit and create data folder for database
|
||||||
|
RUN mkdir -p /srv/conduit/.local/share/conduit
|
||||||
|
COPY --from=builder /root/.cargo/bin/conduit /srv/conduit/
|
||||||
|
|
||||||
|
# Add www-data user and group with UID 82, as used by alpine
|
||||||
|
# https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install
|
||||||
|
RUN set -x ; \
|
||||||
|
addgroup -Sg 82 www-data 2>/dev/null ; \
|
||||||
|
adduser -S -D -H -h /srv/conduit -G www-data -g www-data www-data 2>/dev/null ; \
|
||||||
|
addgroup www-data www-data 2>/dev/null && exit 0 ; exit 1
|
||||||
|
|
||||||
|
# Change ownership of Conduit files to www-data user and group
|
||||||
|
RUN chown -cR www-data:www-data /srv/conduit
|
||||||
|
|
||||||
|
# Install packages needed to run Conduit
|
||||||
|
RUN apk add --no-cache \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
libgcc
|
||||||
|
|
||||||
|
# Test if Conduit is still alive, uses the same endpoint as Element
|
||||||
|
HEALTHCHECK --start-period=5s \
|
||||||
|
CMD curl --fail -s "http://localhost:$(grep -m1 -o 'port\s=\s[0-9]*' conduit.toml | grep -m1 -o '[0-9]*')/_matrix/client/versions" || \
|
||||||
|
curl -k --fail -s "https://localhost:$(grep -m1 -o 'port\s=\s[0-9]*' conduit.toml | grep -m1 -o '[0-9]*')/_matrix/client/versions" || \
|
||||||
|
exit 1
|
||||||
|
|
||||||
|
# Set user to www-data
|
||||||
|
USER www-data
|
||||||
|
# Set container home directory
|
||||||
|
WORKDIR /srv/conduit
|
||||||
|
# Run Conduit
|
||||||
|
ENTRYPOINT [ "/srv/conduit/conduit" ]
|
140
README.md
140
README.md
|
@ -1,84 +1,110 @@
|
||||||
# Conduit
|
# Conduit
|
||||||
|
|
||||||
<!-- ANCHOR: catchphrase -->
|
|
||||||
### A Matrix homeserver written in Rust
|
### A Matrix homeserver written in Rust
|
||||||
<!-- ANCHOR_END: catchphrase -->
|
|
||||||
|
|
||||||
Please visit the [Conduit documentation](https://famedly.gitlab.io/conduit) for more information.
|
|
||||||
Alternatively you can open [docs/introduction.md](docs/introduction.md) in this repository.
|
|
||||||
|
|
||||||
<!-- ANCHOR: body -->
|
|
||||||
#### What is Matrix?
|
|
||||||
|
|
||||||
[Matrix](https://matrix.org) is an open network for secure and decentralized
|
|
||||||
communication. Users from every Matrix homeserver can chat with users from all
|
|
||||||
other Matrix servers. You can even use bridges (also called Matrix appservices)
|
|
||||||
to communicate with users outside of Matrix, like a community on Discord.
|
|
||||||
|
|
||||||
#### What is the goal?
|
#### What is the goal?
|
||||||
|
|
||||||
An efficient Matrix homeserver that's easy to set up and just works. You can install
|
A fast Matrix homeserver that's easy to set up and just works. You can install
|
||||||
it on a mini-computer like the Raspberry Pi to host Matrix for your family,
|
it on a mini-computer like the Raspberry Pi to host Matrix for your family,
|
||||||
friends or company.
|
friends or company.
|
||||||
|
|
||||||
|
|
||||||
#### Can I try it out?
|
#### Can I try it out?
|
||||||
|
|
||||||
Yes! You can test our Conduit instance by opening a client that supports registration tokens such as [Element web](https://app.element.io/), [Nheko](https://matrix.org/ecosystem/clients/nheko/) or [SchildiChat web](https://app.schildi.chat/) and registering on the `conduit.rs` homeserver. The registration token is "for_testing_only". Don't share personal information. Once you have registered, you can use any other [Matrix client](https://matrix.org/ecosystem/clients) to login.
|
Yes! Just open a Matrix client (<https://app.element.io> or Element Android for
|
||||||
|
example) and register on the `https://conduit.koesters.xyz` homeserver.
|
||||||
|
|
||||||
|
|
||||||
|
#### What is it built on?
|
||||||
|
|
||||||
|
- [Ruma](https://www.ruma.io): Useful structures for endpoint requests and
|
||||||
|
responses that can be (de)serialized
|
||||||
|
- [Sled](https://github.com/spacejam/sled): A simple (key, value) database with
|
||||||
|
good performance
|
||||||
|
- [Rocket](https://rocket.rs): A flexible web framework
|
||||||
|
|
||||||
Server hosting for conduit.rs is donated by the Matrix.org Foundation.
|
|
||||||
|
|
||||||
#### What is the current status?
|
#### What is the current status?
|
||||||
|
|
||||||
Conduit is Beta, meaning you can join and participate in most
|
Conduit can already be used chat with other users on Conduit, chat with users
|
||||||
Matrix rooms, but not all features are supported and you might run into bugs
|
from other Matrix servers and even to chat with users on other platforms using
|
||||||
from time to time.
|
appservices. When chatting with users on the same Conduit server, everything
|
||||||
|
should work assuming you use a compatible client.
|
||||||
|
|
||||||
|
**You should not join Matrix rooms without asking the admins first.** We do not
|
||||||
|
know whether Conduit is safe for general use yet, so you should assume there is
|
||||||
|
some chance that it breaks rooms permanently for all participating users. We
|
||||||
|
are not aware of such a bug today, but we would like to do more testing.
|
||||||
|
|
||||||
There are still a few important features missing:
|
There are still a few important features missing:
|
||||||
|
|
||||||
- E2EE emoji comparison over federation (E2EE chat works)
|
- Database stability (currently you might have to do manual upgrades or even wipe the db for new versions)
|
||||||
- Outgoing read receipts, typing, presence over federation (incoming works)
|
- Edge cases for end-to-end encryption over federation
|
||||||
<!-- ANCHOR_END: body -->
|
- Typing and presence over federation
|
||||||
|
- Lots of testing
|
||||||
|
|
||||||
|
Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit/-/milestones/3).
|
||||||
|
|
||||||
|
|
||||||
|
#### How can I deploy my own?
|
||||||
|
|
||||||
|
##### Deploy
|
||||||
|
|
||||||
|
Download or compile a Conduit binary, set up the config and call it from somewhere like a systemd script. [Read
|
||||||
|
more](DEPLOY.md)
|
||||||
|
|
||||||
|
If you want to connect an Appservice to Conduit, take a look at the [Appservice Guide](APPSERVICES.md).
|
||||||
|
|
||||||
|
##### Deploy using a Debian package
|
||||||
|
|
||||||
|
You need to have the `deb` helper command installed that creates Debian packages from Cargo projects (see [cargo-deb](https://github.com/mmstick/cargo-deb/) for more info):
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ cargo install cargo-deb
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, you can create and install a Debian package at a whim:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ cargo deb
|
||||||
|
$ dpkg -i target/debian/matrix-conduit_0.1.0_amd64.deb
|
||||||
|
```
|
||||||
|
|
||||||
|
This will build, package, install, configure and start Conduit. [Read more](debian/README.Debian).
|
||||||
|
|
||||||
|
Note that `cargo deb` supports [cross-compilation](https://github.com/mmstick/cargo-deb/#cross-compilation) too!
|
||||||
|
Official Debian packages will follow once Conduit starts to have stable releases.
|
||||||
|
|
||||||
|
##### Deploy using Docker
|
||||||
|
|
||||||
|
Pull and run the docker image with
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
docker pull matrixconduit/matrix-conduit:latest
|
||||||
|
docker run -d -p 8448:8000 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/srv/conduit/.local/share/conduit matrixconduit/matrix-conduit:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
> <b>Note:</b> You also need to supply a `conduit.toml` config file, you can find an example [here](./conduit-example.toml).
|
||||||
|
> Or you can pass in `-e CONDUIT_CONFIG=""` and configure Conduit purely with env vars.
|
||||||
|
|
||||||
|
Or build and run it with docker or docker-compose. [Read more](docker/README.md)
|
||||||
|
|
||||||
|
|
||||||
<!-- ANCHOR: footer -->
|
|
||||||
#### How can I contribute?
|
#### How can I contribute?
|
||||||
|
|
||||||
1. Look for an issue you would like to work on and make sure no one else is currently working on it.
|
1. Look for an issue you would like to work on and make sure it's not assigned
|
||||||
2. Tell us that you are working on the issue (comment on the issue or chat in
|
to other users
|
||||||
[#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org)). If it is more complicated, please explain your approach and ask questions.
|
2. Ask someone to assign the issue to you (comment on the issue or chat in
|
||||||
3. Fork the repo, create a new branch and push commits.
|
#conduit:nordgedanken.dev)
|
||||||
|
3. Fork the repo and work on the issue. #conduit:nordgedanken.dev is happy to help :)
|
||||||
4. Submit a MR
|
4. Submit a MR
|
||||||
|
|
||||||
#### Contact
|
|
||||||
|
|
||||||
If you have any questions, feel free to
|
|
||||||
- Ask in `#conduit:fachschaften.org` on Matrix
|
|
||||||
- Write an E-Mail to `conduit@koesters.xyz`
|
|
||||||
- Send an direct message to `@timokoesters:fachschaften.org` on Matrix
|
|
||||||
- [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new)
|
|
||||||
|
|
||||||
#### Security
|
|
||||||
|
|
||||||
If you believe you have found a security issue, please send a message to [Timo](https://matrix.to/#/@timo:conduit.rs)
|
|
||||||
and/or [Matthias](https://matrix.to/#/@matthias:ahouansou.cz) on Matrix, or send an email to
|
|
||||||
[conduit@koesters.xyz](mailto:conduit@koesters.xyz). Please do not disclose details about the issue to anyone else before
|
|
||||||
a fix is released publically.
|
|
||||||
|
|
||||||
#### Thanks to
|
|
||||||
|
|
||||||
Thanks to FUTO, Famedly, Prototype Fund (DLR and German BMBF) and all individuals for financially supporting this project.
|
|
||||||
|
|
||||||
Thanks to the contributors to Conduit and all libraries we use, for example:
|
|
||||||
|
|
||||||
- Ruma: A clean library for the Matrix Spec in Rust
|
|
||||||
- axum: A modular web framework
|
|
||||||
|
|
||||||
#### Donate
|
#### Donate
|
||||||
|
|
||||||
- Liberapay: <https://liberapay.com/timokoesters/>
|
Liberapay: <https://liberapay.com/timokoesters/>\
|
||||||
- Bitcoin: `bc1qnnykf986tw49ur7wx9rpw2tevpsztvar5x8w4n`
|
Bitcoin: `bc1qnnykf986tw49ur7wx9rpw2tevpsztvar5x8w4n`
|
||||||
|
|
||||||
|
|
||||||
#### Logo
|
#### Logo
|
||||||
|
|
||||||
- Lightning Bolt Logo: <https://github.com/mozilla/fxemoji/blob/gh-pages/svgs/nature/u26A1-bolt.svg>
|
Lightning Bolt Logo: https://github.com/mozilla/fxemoji/blob/gh-pages/svgs/nature/u26A1-bolt.svg \
|
||||||
- Logo License: <https://github.com/mozilla/fxemoji/blob/gh-pages/LICENSE.md>
|
Logo License: https://github.com/mozilla/fxemoji/blob/gh-pages/LICENSE.md
|
||||||
<!-- ANCHOR_END: footer -->
|
|
||||||
|
|
|
@ -1,37 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
# Path to Complement's source code
|
|
||||||
COMPLEMENT_SRC="$1"
|
|
||||||
|
|
||||||
# A `.jsonl` file to write test logs to
|
|
||||||
LOG_FILE="$2"
|
|
||||||
|
|
||||||
# A `.jsonl` file to write test results to
|
|
||||||
RESULTS_FILE="$3"
|
|
||||||
|
|
||||||
OCI_IMAGE="complement-conduit:dev"
|
|
||||||
|
|
||||||
env \
|
|
||||||
-C "$(git rev-parse --show-toplevel)" \
|
|
||||||
docker build \
|
|
||||||
--tag "$OCI_IMAGE" \
|
|
||||||
--file complement/Dockerfile \
|
|
||||||
.
|
|
||||||
|
|
||||||
# It's okay (likely, even) that `go test` exits nonzero
|
|
||||||
set +o pipefail
|
|
||||||
env \
|
|
||||||
-C "$COMPLEMENT_SRC" \
|
|
||||||
COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \
|
|
||||||
go test -json ./tests | tee "$LOG_FILE"
|
|
||||||
set -o pipefail
|
|
||||||
|
|
||||||
# Post-process the results into an easy-to-compare format
|
|
||||||
cat "$LOG_FILE" | jq -c '
|
|
||||||
select(
|
|
||||||
(.Action == "pass" or .Action == "fail" or .Action == "skip")
|
|
||||||
and .Test != null
|
|
||||||
) | {Action: .Action, Test: .Test}
|
|
||||||
' | sort > "$RESULTS_FILE"
|
|
|
@ -1,40 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
# Build the installable and forward any other arguments too. Also, use
|
|
||||||
# nix-output-monitor instead if it's available.
|
|
||||||
if command -v nom &> /dev/null; then
|
|
||||||
nom build "$@"
|
|
||||||
else
|
|
||||||
nix build "$@"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -z ${ATTIC_TOKEN+x} ]; then
|
|
||||||
nix run --inputs-from . attic -- \
|
|
||||||
login \
|
|
||||||
conduit \
|
|
||||||
"${ATTIC_ENDPOINT:-https://attic.conduit.rs/conduit}" \
|
|
||||||
"$ATTIC_TOKEN"
|
|
||||||
|
|
||||||
readarray -t derivations < <(nix path-info "$@" --derivation)
|
|
||||||
for derivation in "${derivations[@]}"; do
|
|
||||||
cache+=(
|
|
||||||
"$(nix-store --query --requisites --include-outputs "$derivation")"
|
|
||||||
)
|
|
||||||
done
|
|
||||||
|
|
||||||
# Upload them to Attic
|
|
||||||
#
|
|
||||||
# Use `xargs` and a here-string because something would probably explode if
|
|
||||||
# several thousand arguments got passed to a command at once. Hopefully no
|
|
||||||
# store paths include a newline in them.
|
|
||||||
(
|
|
||||||
IFS=$'\n'
|
|
||||||
nix shell --inputs-from . attic -c xargs \
|
|
||||||
attic push conduit <<< "${cache[*]}"
|
|
||||||
)
|
|
||||||
|
|
||||||
else
|
|
||||||
echo "\$ATTIC_TOKEN is unset, skipping uploading to the binary cache"
|
|
||||||
fi
|
|
21
book.toml
21
book.toml
|
@ -1,21 +0,0 @@
|
||||||
[book]
|
|
||||||
description = "Conduit is a simple, fast and reliable chat server for the Matrix protocol"
|
|
||||||
language = "en"
|
|
||||||
multilingual = false
|
|
||||||
src = "docs"
|
|
||||||
title = "Conduit"
|
|
||||||
|
|
||||||
[build]
|
|
||||||
build-dir = "public"
|
|
||||||
create-missing = true
|
|
||||||
|
|
||||||
[output.html]
|
|
||||||
edit-url-template = "https://gitlab.com/famedly/conduit/-/edit/next/{path}"
|
|
||||||
git-repository-icon = "fa-git-square"
|
|
||||||
git-repository-url = "https://gitlab.com/famedly/conduit"
|
|
||||||
|
|
||||||
[output.html.search]
|
|
||||||
limit-results = 15
|
|
||||||
|
|
||||||
[output.html.code.hidelines]
|
|
||||||
json = "~"
|
|
|
@ -1,45 +0,0 @@
|
||||||
FROM rust:1.79.0
|
|
||||||
|
|
||||||
WORKDIR /workdir
|
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
|
||||||
libclang-dev
|
|
||||||
|
|
||||||
COPY Cargo.toml Cargo.toml
|
|
||||||
COPY Cargo.lock Cargo.lock
|
|
||||||
COPY src src
|
|
||||||
RUN cargo build --release \
|
|
||||||
&& mv target/release/conduit conduit \
|
|
||||||
&& rm -rf target
|
|
||||||
|
|
||||||
# Install caddy
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
debian-keyring \
|
|
||||||
debian-archive-keyring \
|
|
||||||
apt-transport-https \
|
|
||||||
curl \
|
|
||||||
&& curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/gpg.key' \
|
|
||||||
| gpg --dearmor -o /usr/share/keyrings/caddy-testing-archive-keyring.gpg \
|
|
||||||
&& curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/debian.deb.txt' \
|
|
||||||
| tee /etc/apt/sources.list.d/caddy-testing.list \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y caddy
|
|
||||||
|
|
||||||
COPY conduit-example.toml conduit.toml
|
|
||||||
COPY complement/caddy.json caddy.json
|
|
||||||
|
|
||||||
ENV SERVER_NAME=localhost
|
|
||||||
ENV CONDUIT_CONFIG=/workdir/conduit.toml
|
|
||||||
|
|
||||||
RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml
|
|
||||||
RUN echo "log = \"warn,_=off,sled=off\"" >> conduit.toml
|
|
||||||
RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml
|
|
||||||
|
|
||||||
EXPOSE 8008 8448
|
|
||||||
|
|
||||||
CMD uname -a && \
|
|
||||||
sed -i "s/#server_name = \"your.server.name\"/server_name = \"${SERVER_NAME}\"/g" conduit.toml && \
|
|
||||||
sed -i "s/your.server.name/${SERVER_NAME}/g" caddy.json && \
|
|
||||||
caddy start --config caddy.json > /dev/null && \
|
|
||||||
/workdir/conduit
|
|
|
@ -1,11 +0,0 @@
|
||||||
# Complement
|
|
||||||
|
|
||||||
## What's that?
|
|
||||||
|
|
||||||
Have a look at [its repository](https://github.com/matrix-org/complement).
|
|
||||||
|
|
||||||
## How do I use it with Conduit?
|
|
||||||
|
|
||||||
The script at [`../bin/complement`](../bin/complement) has automation for this.
|
|
||||||
It takes a few command line arguments, you can read the script to find out what
|
|
||||||
those are.
|
|
|
@ -1,72 +0,0 @@
|
||||||
{
|
|
||||||
"logging": {
|
|
||||||
"logs": {
|
|
||||||
"default": {
|
|
||||||
"level": "WARN"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"apps": {
|
|
||||||
"http": {
|
|
||||||
"https_port": 8448,
|
|
||||||
"servers": {
|
|
||||||
"srv0": {
|
|
||||||
"listen": [":8448"],
|
|
||||||
"routes": [{
|
|
||||||
"match": [{
|
|
||||||
"host": ["your.server.name"]
|
|
||||||
}],
|
|
||||||
"handle": [{
|
|
||||||
"handler": "subroute",
|
|
||||||
"routes": [{
|
|
||||||
"handle": [{
|
|
||||||
"handler": "reverse_proxy",
|
|
||||||
"upstreams": [{
|
|
||||||
"dial": "127.0.0.1:8008"
|
|
||||||
}]
|
|
||||||
}]
|
|
||||||
}]
|
|
||||||
}],
|
|
||||||
"terminal": true
|
|
||||||
}],
|
|
||||||
"tls_connection_policies": [{
|
|
||||||
"match": {
|
|
||||||
"sni": ["your.server.name"]
|
|
||||||
}
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"pki": {
|
|
||||||
"certificate_authorities": {
|
|
||||||
"local": {
|
|
||||||
"name": "Complement CA",
|
|
||||||
"root": {
|
|
||||||
"certificate": "/complement/ca/ca.crt",
|
|
||||||
"private_key": "/complement/ca/ca.key"
|
|
||||||
},
|
|
||||||
"intermediate": {
|
|
||||||
"certificate": "/complement/ca/ca.crt",
|
|
||||||
"private_key": "/complement/ca/ca.key"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"tls": {
|
|
||||||
"automation": {
|
|
||||||
"policies": [{
|
|
||||||
"subjects": ["your.server.name"],
|
|
||||||
"issuers": [{
|
|
||||||
"module": "internal"
|
|
||||||
}],
|
|
||||||
"on_demand": true
|
|
||||||
}, {
|
|
||||||
"issuers": [{
|
|
||||||
"module": "internal",
|
|
||||||
"ca": "local"
|
|
||||||
}]
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,74 +1,47 @@
|
||||||
# =============================================================================
|
|
||||||
# This is the official example config for Conduit.
|
|
||||||
# If you use it for your server, you will need to adjust it to your own needs.
|
|
||||||
# At the very least, change the server_name field!
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
|
|
||||||
[global]
|
[global]
|
||||||
# The server_name is the pretty name of this server. It is used as a suffix for user
|
# The server_name is the name of this server. It is used as a suffix for user
|
||||||
# and room ids. Examples: matrix.org, conduit.rs
|
# and room ids. Examples: matrix.org, conduit.rs
|
||||||
|
# The Conduit server needs to be reachable at https://your.server.name/ on port
|
||||||
# The Conduit server needs all /_matrix/ requests to be reachable at
|
# 443 (client-server) and 8448 (federation) OR you can create /.well-known
|
||||||
# https://your.server.name/ on port 443 (client-server) and 8448 (federation).
|
# files to redirect requests. See
|
||||||
|
|
||||||
# If that's not possible for you, you can create /.well-known files to redirect
|
|
||||||
# requests. See
|
|
||||||
# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client
|
# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client
|
||||||
# and
|
# and https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server
|
||||||
# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server
|
# for more information
|
||||||
# for more information, or continue below to see how conduit can do this for you.
|
|
||||||
|
|
||||||
# YOU NEED TO EDIT THIS
|
# YOU NEED TO EDIT THIS
|
||||||
#server_name = "your.server.name"
|
#server_name = "your.server.name"
|
||||||
|
|
||||||
database_backend = "rocksdb"
|
|
||||||
# This is the only directory where Conduit will save its data
|
# This is the only directory where Conduit will save its data
|
||||||
database_path = "/var/lib/matrix-conduit/"
|
database_path = "/var/lib/conduit/"
|
||||||
|
|
||||||
# The port Conduit will be running on. You need to set up a reverse proxy in
|
# The port Conduit will be running on. You need to set up a reverse proxy in
|
||||||
# your web server (e.g. apache or nginx), so all requests to /_matrix on port
|
# your web server (e.g. apache or nginx), so all requests to /_matrix on port
|
||||||
# 443 and 8448 will be forwarded to the Conduit instance running on this port
|
# 443 and 8448 will be forwarded to the Conduit instance running on this port
|
||||||
# Docker users: Don't change this, you'll need to map an external port to this.
|
|
||||||
port = 6167
|
port = 6167
|
||||||
|
|
||||||
# Max size for uploads
|
# Max size for uploads
|
||||||
max_request_size = 20_000_000 # in bytes
|
max_request_size = 20_000_000 # in bytes
|
||||||
|
|
||||||
# Enables registration. If set to false, no users can register on this server.
|
# Disable registration. No new users will be able to register on this server
|
||||||
allow_registration = true
|
#allow_registration = false
|
||||||
|
|
||||||
# A static registration token that new users will have to provide when creating
|
# Disable encryption, so no new encrypted rooms can be created
|
||||||
# an account. YOU NEED TO EDIT THIS.
|
# Note: existing rooms will continue to work
|
||||||
# - Insert a password that users will have to enter on registration
|
#allow_encryption = false
|
||||||
# - Start the line with '#' to remove the condition
|
#allow_federation = false
|
||||||
registration_token = ""
|
|
||||||
|
|
||||||
allow_check_for_updates = true
|
# Enable jaeger to support monitoring and troubleshooting through jaeger
|
||||||
allow_federation = true
|
#allow_jaeger = false
|
||||||
|
|
||||||
# Enable the display name lightning bolt on registration.
|
|
||||||
enable_lightning_bolt = true
|
|
||||||
|
|
||||||
# Servers listed here will be used to gather public keys of other servers.
|
|
||||||
# Generally, copying this exactly should be enough. (Currently, Conduit doesn't
|
|
||||||
# support batched key requests, so this list should only contain Synapse
|
|
||||||
# servers.)
|
|
||||||
trusted_servers = ["matrix.org"]
|
trusted_servers = ["matrix.org"]
|
||||||
|
|
||||||
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||||
|
#log = "info,state_res=warn,rocket=off,_=off,sled=off"
|
||||||
# Controls the log verbosity. See also [here][0].
|
#workers = 4 # default: cpu core count * 2
|
||||||
#
|
|
||||||
# [0]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives
|
|
||||||
#log = "..."
|
|
||||||
|
|
||||||
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
||||||
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
|
|
||||||
|
|
||||||
[global.well_known]
|
proxy = "none" # more examples can be found at src/database/proxy.rs:6
|
||||||
# Conduit handles the /.well-known/matrix/* endpoints, making both clients and servers try to access conduit with the host
|
|
||||||
# server_name and port 443 by default.
|
# The total amount of memory that the database will use.
|
||||||
# If you want to override these defaults, uncomment and edit the following lines accordingly:
|
#db_cache_capacity_mb = 200
|
||||||
#server = your.server.name:443
|
|
||||||
#client = https://your.server.name
|
|
||||||
|
|
18
debian/README.md → debian/README.Debian
vendored
18
debian/README.md → debian/README.Debian
vendored
|
@ -1,36 +1,28 @@
|
||||||
Conduit for Debian
|
Conduit for Debian
|
||||||
==================
|
==================
|
||||||
|
|
||||||
Installation
|
|
||||||
------------
|
|
||||||
|
|
||||||
Information about downloading, building and deploying the Debian package, see
|
|
||||||
the "Installing Conduit" section in the Deploying docs.
|
|
||||||
All following sections until "Setting up the Reverse Proxy" be ignored because
|
|
||||||
this is handled automatically by the packaging.
|
|
||||||
|
|
||||||
Configuration
|
Configuration
|
||||||
-------------
|
-------------
|
||||||
|
|
||||||
When installed, Debconf generates the configuration of the homeserver
|
When installed, Debconf generates the configuration of the homeserver
|
||||||
(host)name, the address and port it listens on. This configuration ends up in
|
(host)name, the address and port it listens on. This configuration ends up in
|
||||||
`/etc/matrix-conduit/conduit.toml`.
|
/etc/matrix-conduit/conduit.toml.
|
||||||
|
|
||||||
You can tweak more detailed settings by uncommenting and setting the variables
|
You can tweak more detailed settings by uncommenting and setting the variables
|
||||||
in `/etc/matrix-conduit/conduit.toml`. This involves settings such as the maximum
|
in /etc/matrix-conduit/conduit.toml. This involves settings such as the maximum
|
||||||
file size for download/upload, enabling federation, etc.
|
file size for download/upload, enabling federation, etc.
|
||||||
|
|
||||||
Running
|
Running
|
||||||
-------
|
-------
|
||||||
|
|
||||||
The package uses the `matrix-conduit.service` systemd unit file to start and
|
The package uses the matrix-conduit.service systemd unit file to start and
|
||||||
stop Conduit. It loads the configuration file mentioned above to set up the
|
stop Conduit. It loads the configuration file mentioned above to set up the
|
||||||
environment before running the server.
|
environment before running the server.
|
||||||
|
|
||||||
This package assumes by default that Conduit will be placed behind a reverse
|
This package assumes by default that Conduit will be placed behind a reverse
|
||||||
proxy such as Apache or nginx. This default deployment entails just listening
|
proxy such as Apache or nginx. This default deployment entails just listening
|
||||||
on `127.0.0.1` and the free port `6167` and is reachable via a client using the URL
|
on 127.0.0.1 and the free port 6167 and is reachable via a client using the URL
|
||||||
<http://localhost:6167>.
|
http://localhost:6167.
|
||||||
|
|
||||||
At a later stage this packaging may support also setting up TLS and running
|
At a later stage this packaging may support also setting up TLS and running
|
||||||
stand-alone. In this case, however, you need to set up some certificates and
|
stand-alone. In this case, however, you need to set up some certificates and
|
1
debian/matrix-conduit.service
vendored
1
debian/matrix-conduit.service
vendored
|
@ -3,7 +3,6 @@ Description=Conduit Matrix homeserver
|
||||||
After=network.target
|
After=network.target
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
DynamicUser=yes
|
|
||||||
User=_matrix-conduit
|
User=_matrix-conduit
|
||||||
Group=_matrix-conduit
|
Group=_matrix-conduit
|
||||||
Type=simple
|
Type=simple
|
||||||
|
|
65
debian/postinst
vendored
65
debian/postinst
vendored
|
@ -5,7 +5,7 @@ set -e
|
||||||
|
|
||||||
CONDUIT_CONFIG_PATH=/etc/matrix-conduit
|
CONDUIT_CONFIG_PATH=/etc/matrix-conduit
|
||||||
CONDUIT_CONFIG_FILE="${CONDUIT_CONFIG_PATH}/conduit.toml"
|
CONDUIT_CONFIG_FILE="${CONDUIT_CONFIG_PATH}/conduit.toml"
|
||||||
CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit/
|
CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit/conduit_db
|
||||||
|
|
||||||
case "$1" in
|
case "$1" in
|
||||||
configure)
|
configure)
|
||||||
|
@ -19,11 +19,11 @@ case "$1" in
|
||||||
_matrix-conduit
|
_matrix-conduit
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create the database path if it does not exist yet and fix up ownership
|
# Create the database path if it does not exist yet.
|
||||||
# and permissions.
|
if [ ! -d "$CONDUIT_DATABASE_PATH" ]; then
|
||||||
mkdir -p "$CONDUIT_DATABASE_PATH"
|
mkdir -p "$CONDUIT_DATABASE_PATH"
|
||||||
chown _matrix-conduit "$CONDUIT_DATABASE_PATH"
|
chown _matrix-conduit "$CONDUIT_DATABASE_PATH"
|
||||||
chmod 700 "$CONDUIT_DATABASE_PATH"
|
fi
|
||||||
|
|
||||||
if [ ! -e "$CONDUIT_CONFIG_FILE" ]; then
|
if [ ! -e "$CONDUIT_CONFIG_FILE" ]; then
|
||||||
# Write the debconf values in the config.
|
# Write the debconf values in the config.
|
||||||
|
@ -36,24 +36,18 @@ case "$1" in
|
||||||
mkdir -p "$CONDUIT_CONFIG_PATH"
|
mkdir -p "$CONDUIT_CONFIG_PATH"
|
||||||
cat > "$CONDUIT_CONFIG_FILE" << EOF
|
cat > "$CONDUIT_CONFIG_FILE" << EOF
|
||||||
[global]
|
[global]
|
||||||
# The server_name is the pretty name of this server. It is used as a suffix for
|
# The server_name is the name of this server. It is used as a suffix for user
|
||||||
# user and room ids. Examples: matrix.org, conduit.rs
|
# and room ids. Examples: matrix.org, conduit.rs
|
||||||
|
# The Conduit server needs to be reachable at https://your.server.name/ on port
|
||||||
# The Conduit server needs all /_matrix/ requests to be reachable at
|
# 443 (client-server) and 8448 (federation) OR you can create /.well-known
|
||||||
# https://your.server.name/ on port 443 (client-server) and 8448 (federation).
|
# files to redirect requests. See
|
||||||
|
|
||||||
# If that's not possible for you, you can create /.well-known files to redirect
|
|
||||||
# requests. See
|
|
||||||
# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client
|
# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client
|
||||||
# and
|
# and https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server
|
||||||
# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server
|
# for more information.
|
||||||
# for more information
|
|
||||||
|
|
||||||
server_name = "${CONDUIT_SERVER_NAME}"
|
server_name = "${CONDUIT_SERVER_NAME}"
|
||||||
|
|
||||||
# This is the only directory where Conduit will save its data.
|
# This is the only directory where Conduit will save its data.
|
||||||
database_path = "${CONDUIT_DATABASE_PATH}"
|
database_path = "${CONDUIT_DATABASE_PATH}"
|
||||||
database_backend = "rocksdb"
|
|
||||||
|
|
||||||
# The address Conduit will be listening on.
|
# The address Conduit will be listening on.
|
||||||
# By default the server listens on address 0.0.0.0. Change this to 127.0.0.1 to
|
# By default the server listens on address 0.0.0.0. Change this to 127.0.0.1 to
|
||||||
|
@ -62,40 +56,29 @@ address = "${CONDUIT_ADDRESS}"
|
||||||
|
|
||||||
# The port Conduit will be running on. You need to set up a reverse proxy in
|
# The port Conduit will be running on. You need to set up a reverse proxy in
|
||||||
# your web server (e.g. apache or nginx), so all requests to /_matrix on port
|
# your web server (e.g. apache or nginx), so all requests to /_matrix on port
|
||||||
# 443 and 8448 will be forwarded to the Conduit instance running on this port
|
# 443 and 8448 will be forwarded to the Conduit instance running on this port.
|
||||||
# Docker users: Don't change this, you'll need to map an external port to this.
|
|
||||||
port = ${CONDUIT_PORT}
|
port = ${CONDUIT_PORT}
|
||||||
|
|
||||||
# Max size for uploads
|
# Max size for uploads
|
||||||
max_request_size = 20_000_000 # in bytes
|
max_request_size = 20_000_000 # in bytes
|
||||||
|
|
||||||
# Enables registration. If set to false, no users can register on this server.
|
# Disable registration. No new users will be able to register on this server.
|
||||||
allow_registration = true
|
#allow_registration = false
|
||||||
|
|
||||||
# A static registration token that new users will have to provide when creating
|
# Disable encryption, so no new encrypted rooms can be created.
|
||||||
# an account.
|
# Note: Existing rooms will continue to work.
|
||||||
# - Insert a password that users will have to enter on registration
|
#allow_encryption = false
|
||||||
# - Start the line with '#' to remove the condition
|
#allow_federation = false
|
||||||
#registration_token = ""
|
|
||||||
|
|
||||||
allow_federation = true
|
# Enable jaeger to support monitoring and troubleshooting through jaeger.
|
||||||
allow_check_for_updates = true
|
#allow_jaeger = false
|
||||||
|
|
||||||
# Enable the display name lightning bolt on registration.
|
|
||||||
enable_lightning_bolt = true
|
|
||||||
|
|
||||||
# Servers listed here will be used to gather public keys of other servers.
|
|
||||||
# Generally, copying this exactly should be enough. (Currently, Conduit doesn't
|
|
||||||
# support batched key requests, so this list should only contain Synapse
|
|
||||||
# servers.)
|
|
||||||
trusted_servers = ["matrix.org"]
|
|
||||||
|
|
||||||
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||||
|
#log = "info,state_res=warn,rocket=off,_=off,sled=off"
|
||||||
|
#workers = 4 # default: cpu core count * 2
|
||||||
|
|
||||||
# Controls the log verbosity. See also [here][0].
|
# The total amount of memory that the database will use.
|
||||||
#
|
#db_cache_capacity_mb = 200
|
||||||
# [0]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives
|
|
||||||
#log = "..."
|
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
|
|
10
default.nix
10
default.nix
|
@ -1,10 +0,0 @@
|
||||||
(import
|
|
||||||
(
|
|
||||||
let lock = builtins.fromJSON (builtins.readFile ./flake.lock); in
|
|
||||||
fetchTarball {
|
|
||||||
url = lock.nodes.flake-compat.locked.url or "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz";
|
|
||||||
sha256 = lock.nodes.flake-compat.locked.narHash;
|
|
||||||
}
|
|
||||||
)
|
|
||||||
{ src = ./.; }
|
|
||||||
).defaultNix
|
|
|
@ -7,8 +7,8 @@ services:
|
||||||
### then you are ready to go.
|
### then you are ready to go.
|
||||||
image: matrixconduit/matrix-conduit:latest
|
image: matrixconduit/matrix-conduit:latest
|
||||||
### If you want to build a fresh image from the sources, then comment the image line and uncomment the
|
### If you want to build a fresh image from the sources, then comment the image line and uncomment the
|
||||||
### build lines. If you want meaningful labels in your built Conduit image, you should run docker compose like this:
|
### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this:
|
||||||
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker compose up -d
|
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d
|
||||||
# build:
|
# build:
|
||||||
# context: .
|
# context: .
|
||||||
# args:
|
# args:
|
||||||
|
@ -20,21 +20,27 @@ services:
|
||||||
ports:
|
ports:
|
||||||
- 8448:6167
|
- 8448:6167
|
||||||
volumes:
|
volumes:
|
||||||
- db:/var/lib/matrix-conduit/
|
- db:/srv/conduit/.local/share/conduit
|
||||||
|
### Uncomment if you want to use conduit.toml to configure Conduit
|
||||||
|
### Note: Set env vars will override conduit.toml values
|
||||||
|
# - ./conduit.toml:/srv/conduit/conduit.toml
|
||||||
environment:
|
environment:
|
||||||
CONDUIT_SERVER_NAME: your.server.name # EDIT THIS
|
CONDUIT_SERVER_NAME: localhost:6167 # replace with your own name
|
||||||
CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/
|
|
||||||
CONDUIT_DATABASE_BACKEND: rocksdb
|
|
||||||
CONDUIT_PORT: 6167
|
|
||||||
CONDUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
|
||||||
CONDUIT_ALLOW_REGISTRATION: 'true'
|
|
||||||
CONDUIT_ALLOW_FEDERATION: 'true'
|
|
||||||
CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
|
||||||
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||||
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
### Uncomment and change values as desired
|
||||||
CONDUIT_ADDRESS: 0.0.0.0
|
# CONDUIT_ADDRESS: 0.0.0.0
|
||||||
CONDUIT_CONFIG: '' # Ignore this
|
# CONDUIT_PORT: 6167
|
||||||
#
|
# CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string ''
|
||||||
|
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
|
||||||
|
# CONDUIT_LOG: info # default is: "info,rocket=off,_=off,sled=off"
|
||||||
|
# CONDUIT_ALLOW_JAEGER: 'false'
|
||||||
|
# CONDUIT_ALLOW_REGISTRATION : 'false'
|
||||||
|
# CONDUIT_ALLOW_ENCRYPTION: 'false'
|
||||||
|
# CONDUIT_ALLOW_FEDERATION: 'false'
|
||||||
|
# CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit
|
||||||
|
# CONDUIT_WORKERS: 10
|
||||||
|
# CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
||||||
|
|
||||||
### Uncomment if you want to use your own Element-Web App.
|
### Uncomment if you want to use your own Element-Web App.
|
||||||
### Note: You need to provide a config.json for Element and you also need a second
|
### Note: You need to provide a config.json for Element and you also need a second
|
||||||
### Domain or Subdomain for the communication between Element and Conduit
|
### Domain or Subdomain for the communication between Element and Conduit
|
74
docker/README.md
Normal file
74
docker/README.md
Normal file
|
@ -0,0 +1,74 @@
|
||||||
|
# Deploy using Docker
|
||||||
|
|
||||||
|
> **Note:** To run and use Conduit you should probably use it with a Domain or Subdomain behind a reverse proxy (like Nginx, Traefik, Apache, ...) with a Lets Encrypt certificate.
|
||||||
|
|
||||||
|
|
||||||
|
## Docker
|
||||||
|
|
||||||
|
### Build & Dockerfile
|
||||||
|
|
||||||
|
The Dockerfile provided by Conduit has two stages, each of which creates an image.
|
||||||
|
1. **Builder:** Builds the binary from local context or by cloning a git revision from the official repository.
|
||||||
|
2. **Runtime:** Copies the built binary from **Builder** and sets up the runtime environment, like creating a volume to persist the database and applying the correct permissions.
|
||||||
|
|
||||||
|
The Dockerfile includes a few build arguments that should be supplied when building it.
|
||||||
|
|
||||||
|
``` Dockerfile
|
||||||
|
ARG LOCAL=false
|
||||||
|
ARG CREATED
|
||||||
|
ARG VERSION
|
||||||
|
ARG GIT_REF=origin/master
|
||||||
|
```
|
||||||
|
|
||||||
|
- **CREATED:** Date and time as string (date-time as defined by RFC 3339). Will be used to create the Open Container Initiative compliant label `org.opencontainers.image.created`. Supply by it like this `$(date -u +'%Y-%m-%dT%H:%M:%SZ')`
|
||||||
|
- **VERSION:** The SemVer version of Conduit, which is in the image. Will be used to create the Open Container Initiative compliant label `org.opencontainers.image.version`. If you have a `Cargo.toml` in your build context, you can get it with `$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)`
|
||||||
|
- **LOCAL:** *(Optional)* A boolean value, specifies if the local build context should be used, or if the official repository will be cloned. If not supplied with the build command, it will default to `false`.
|
||||||
|
- **GIT_REF:** *(Optional)* A git ref, like `HEAD` or a commit ID. The supplied ref will be used to create the Open Container Initiative compliant label `org.opencontainers.image.revision` and will be the ref that is cloned from the repository when not building from the local context. If not supplied with the build command, it will default to `origin/master`.
|
||||||
|
|
||||||
|
To build the image you can use the following command
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
docker build . -t matrixconduit/matrix-conduit:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)
|
||||||
|
```
|
||||||
|
|
||||||
|
which also will tag the resulting image as `matrixconduit/matrix-conduit:latest`.
|
||||||
|
**Note:** it ommits the two optional `build-arg`s.
|
||||||
|
|
||||||
|
|
||||||
|
### Run
|
||||||
|
|
||||||
|
After building the image you can simply run it with
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
docker run -d -p 8448:8000 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/srv/conduit/.local/share/conduit matrixconduit/matrix-conduit:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
For detached mode, you also need to use the `-d` flag. You also need to supply a `conduit.toml` config file, you can find an example [here](../conduit-example.toml).
|
||||||
|
You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need
|
||||||
|
too pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible values, please take a look at the `docker-compose.yml` file.
|
||||||
|
If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it.
|
||||||
|
|
||||||
|
|
||||||
|
## Docker-compose
|
||||||
|
|
||||||
|
If the docker command is not for you or your setup, you can also use one of the provided `docker-compose` files. Depending on your proxy setup, use the [`docker-compose.traefik.yml`](docker-compose.traefik.yml) including [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml) or the normal [`docker-compose.yml`](../docker-compose.yml) for every other reverse proxy.
|
||||||
|
|
||||||
|
|
||||||
|
### Build
|
||||||
|
|
||||||
|
To build the Conduit image with docker-compose, you first need to open and modify the `docker-compose.yml` file. There you need to comment the `image:` option and uncomment the `build:` option. Then call docker-compose with:
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up
|
||||||
|
```
|
||||||
|
|
||||||
|
This will also start the container right afterwards, so if want it to run in detached mode, you also should use the `-d` flag. For possible `build-args`, please take a look at the above `Build & Dockerfile` section.
|
||||||
|
|
||||||
|
|
||||||
|
### Run
|
||||||
|
|
||||||
|
If you already have built the image, you can just start the container and everything else in the compose file in detached mode with:
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
docker-compose up -d
|
||||||
|
```
|
|
@ -1,38 +1,20 @@
|
||||||
# syntax=docker/dockerfile:1
|
|
||||||
# ---------------------------------------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------------------------------------
|
||||||
# This Dockerfile is intended to be built as part of Conduit's CI pipeline.
|
# This Dockerfile is intended to be built as part of Conduit's CI pipeline.
|
||||||
# It does not build Conduit in Docker, but just copies the matching build artifact from the build jobs.
|
# It does not build Conduit in Docker, but just copies the matching build artifact from the build job.
|
||||||
|
# As a consequence, this is not a multiarch capable image. It always expects and packages a x86_64 binary.
|
||||||
#
|
#
|
||||||
# It is mostly based on the normal Conduit Dockerfile, but adjusted in a few places to maximise caching.
|
# It is mostly based on the normal Conduit Dockerfile, but adjusted in a few places to maximise caching.
|
||||||
# Credit's for the original Dockerfile: Weasy666.
|
# Credit's for the original Dockerfile: Weasy666.
|
||||||
# ---------------------------------------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
FROM docker.io/alpine:3.16.0@sha256:4ff3ca91275773af45cb4b0834e12b7eb47d1c18f770a0b151381cd227f4c253 AS runner
|
FROM alpine:3.14
|
||||||
|
|
||||||
|
|
||||||
# Standard port on which Conduit launches.
|
|
||||||
# You still need to map the port when using the docker command or docker-compose.
|
|
||||||
EXPOSE 6167
|
|
||||||
|
|
||||||
# Users are expected to mount a volume to this directory:
|
|
||||||
ARG DEFAULT_DB_PATH=/var/lib/matrix-conduit
|
|
||||||
|
|
||||||
ENV CONDUIT_PORT=6167 \
|
|
||||||
CONDUIT_ADDRESS="0.0.0.0" \
|
|
||||||
CONDUIT_DATABASE_PATH=${DEFAULT_DB_PATH} \
|
|
||||||
CONDUIT_CONFIG=''
|
|
||||||
# └─> Set no config file to do all configuration with env vars
|
|
||||||
|
|
||||||
# Conduit needs:
|
|
||||||
# ca-certificates: for https
|
|
||||||
# iproute2: for `ss` for the healthcheck script
|
|
||||||
RUN apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
iproute2
|
|
||||||
|
|
||||||
ARG CREATED
|
ARG CREATED
|
||||||
ARG VERSION
|
ARG VERSION
|
||||||
ARG GIT_REF
|
ARG GIT_REF
|
||||||
|
|
||||||
|
ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml"
|
||||||
|
|
||||||
# Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md
|
# Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md
|
||||||
# including a custom label specifying the build command
|
# including a custom label specifying the build command
|
||||||
LABEL org.opencontainers.image.created=${CREATED} \
|
LABEL org.opencontainers.image.created=${CREATED} \
|
||||||
|
@ -45,40 +27,44 @@ LABEL org.opencontainers.image.created=${CREATED} \
|
||||||
org.opencontainers.image.revision=${GIT_REF} \
|
org.opencontainers.image.revision=${GIT_REF} \
|
||||||
org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \
|
org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \
|
||||||
org.opencontainers.image.licenses="Apache-2.0" \
|
org.opencontainers.image.licenses="Apache-2.0" \
|
||||||
org.opencontainers.image.documentation="https://gitlab.com/famedly/conduit" \
|
org.opencontainers.image.documentation="" \
|
||||||
org.opencontainers.image.ref.name=""
|
org.opencontainers.image.ref.name=""
|
||||||
|
|
||||||
|
# Standard port on which Conduit launches. You still need to map the port when using the docker command or docker-compose.
|
||||||
|
EXPOSE 6167
|
||||||
|
|
||||||
|
# create data folder for database
|
||||||
|
RUN mkdir -p /srv/conduit/.local/share/conduit
|
||||||
|
|
||||||
|
# Add www-data user and group with UID 82, as used by alpine
|
||||||
|
# https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install
|
||||||
|
RUN set -x ; \
|
||||||
|
addgroup -Sg 82 www-data 2>/dev/null ; \
|
||||||
|
adduser -S -D -H -h /srv/conduit -G www-data -g www-data www-data 2>/dev/null ; \
|
||||||
|
addgroup www-data www-data 2>/dev/null && exit 0 ; exit 1
|
||||||
|
|
||||||
|
# Change ownership of Conduit files to www-data user and group
|
||||||
|
RUN chown -cR www-data:www-data /srv/conduit
|
||||||
|
|
||||||
|
# Install packages needed to run Conduit
|
||||||
|
RUN apk add --no-cache \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
libgcc
|
||||||
|
|
||||||
# Test if Conduit is still alive, uses the same endpoint as Element
|
# Test if Conduit is still alive, uses the same endpoint as Element
|
||||||
COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh
|
HEALTHCHECK --start-period=5s \
|
||||||
HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
|
CMD curl --fail -s "http://localhost:$(grep -m1 -o 'port\s=\s[0-9]*' conduit.toml | grep -m1 -o '[0-9]*')/_matrix/client/versions" || \
|
||||||
|
curl -k --fail -s "https://localhost:$(grep -m1 -o 'port\s=\s[0-9]*' conduit.toml | grep -m1 -o '[0-9]*')/_matrix/client/versions" || \
|
||||||
|
exit 1
|
||||||
|
|
||||||
# Improve security: Don't run stuff as root, that does not need to run as root:
|
# Set user to www-data
|
||||||
# Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems.
|
USER www-data
|
||||||
ARG USER_ID=1000
|
|
||||||
ARG GROUP_ID=1000
|
|
||||||
RUN set -x ; \
|
|
||||||
deluser --remove-home www-data ; \
|
|
||||||
addgroup -S -g ${GROUP_ID} conduit 2>/dev/null ; \
|
|
||||||
adduser -S -u ${USER_ID} -D -H -h /srv/conduit -G conduit -g conduit conduit 2>/dev/null ; \
|
|
||||||
addgroup conduit conduit 2>/dev/null && exit 0 ; exit 1
|
|
||||||
|
|
||||||
# Change ownership of Conduit files to conduit user and group
|
|
||||||
RUN chown -cR conduit:conduit /srv/conduit && \
|
|
||||||
chmod +x /srv/conduit/healthcheck.sh && \
|
|
||||||
mkdir -p ${DEFAULT_DB_PATH} && \
|
|
||||||
chown -cR conduit:conduit ${DEFAULT_DB_PATH}
|
|
||||||
|
|
||||||
# Change user to conduit
|
|
||||||
USER conduit
|
|
||||||
# Set container home directory
|
# Set container home directory
|
||||||
WORKDIR /srv/conduit
|
WORKDIR /srv/conduit
|
||||||
|
# Run Conduit
|
||||||
# Run Conduit and print backtraces on panics
|
|
||||||
ENV RUST_BACKTRACE=1
|
|
||||||
ENTRYPOINT [ "/srv/conduit/conduit" ]
|
ENTRYPOINT [ "/srv/conduit/conduit" ]
|
||||||
|
|
||||||
# Depending on the target platform (e.g. "linux/arm/v7", "linux/arm64/v8", or "linux/amd64")
|
|
||||||
# copy the matching binary into this docker image
|
# Copy the Conduit binary into the image at the latest possible moment to maximise caching:
|
||||||
ARG TARGETPLATFORM
|
COPY ./conduit-x86_64-unknown-linux-musl /srv/conduit/conduit
|
||||||
COPY --chown=conduit:conduit ./$TARGETPLATFORM /srv/conduit/conduit
|
|
||||||
|
|
22
docker/docker-compose.override.traefik.yml
Normal file
22
docker/docker-compose.override.traefik.yml
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
# Conduit - Traefik Reverse Proxy Labels
|
||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
homeserver:
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network
|
||||||
|
|
||||||
|
- "traefik.http.routers.to-conduit.rule=Host(`<SUBDOMAIN>.<DOMAIN>`)" # Change to the address on which Conduit is hosted
|
||||||
|
- "traefik.http.routers.to-conduit.tls=true"
|
||||||
|
- "traefik.http.routers.to-conduit.tls.certresolver=letsencrypt"
|
||||||
|
|
||||||
|
### Uncomment this if you uncommented Element-Web App in the docker-compose.yml
|
||||||
|
# element-web:
|
||||||
|
# labels:
|
||||||
|
# - "traefik.enable=true"
|
||||||
|
# - "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network
|
||||||
|
|
||||||
|
# - "traefik.http.routers.to-element-web.rule=Host(`<SUBDOMAIN>.<DOMAIN>`)" # Change to the address on which Element-Web is hosted
|
||||||
|
# - "traefik.http.routers.to-element-web.tls=true"
|
||||||
|
# - "traefik.http.routers.to-element-web.tls.certresolver=letsencrypt"
|
|
@ -7,8 +7,8 @@ services:
|
||||||
### then you are ready to go.
|
### then you are ready to go.
|
||||||
image: matrixconduit/matrix-conduit:latest
|
image: matrixconduit/matrix-conduit:latest
|
||||||
### If you want to build a fresh image from the sources, then comment the image line and uncomment the
|
### If you want to build a fresh image from the sources, then comment the image line and uncomment the
|
||||||
### build lines. If you want meaningful labels in your built Conduit image, you should run docker compose like this:
|
### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this:
|
||||||
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker compose up -d
|
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d
|
||||||
# build:
|
# build:
|
||||||
# context: .
|
# context: .
|
||||||
# args:
|
# args:
|
||||||
|
@ -27,28 +27,19 @@ services:
|
||||||
environment:
|
environment:
|
||||||
CONDUIT_SERVER_NAME: localhost:6167 # replace with your own name
|
CONDUIT_SERVER_NAME: localhost:6167 # replace with your own name
|
||||||
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||||
CONDUIT_ALLOW_REGISTRATION : 'true'
|
|
||||||
### Uncomment and change values as desired
|
### Uncomment and change values as desired
|
||||||
# CONDUIT_ADDRESS: 0.0.0.0
|
# CONDUIT_ADDRESS: 0.0.0.0
|
||||||
# CONDUIT_PORT: 6167
|
# CONDUIT_PORT: 6167
|
||||||
# CONDUIT_REGISTRATION_TOKEN: '' # require password for registration
|
|
||||||
# CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string ''
|
# CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string ''
|
||||||
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
|
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
|
||||||
# CONDUIT_ALLOW_ENCRYPTION: 'true'
|
# CONDUIT_LOG: info # default is: "info,rocket=off,_=off,sled=off"
|
||||||
# CONDUIT_ALLOW_FEDERATION: 'true'
|
# CONDUIT_ALLOW_JAEGER: 'false'
|
||||||
# CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
# CONDUIT_ALLOW_REGISTRATION : 'false'
|
||||||
|
# CONDUIT_ALLOW_ENCRYPTION: 'false'
|
||||||
|
# CONDUIT_ALLOW_FEDERATION: 'false'
|
||||||
# CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit
|
# CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit
|
||||||
# CONDUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
# CONDUIT_WORKERS: 10
|
||||||
|
# CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
||||||
# We need some way to server the client and server .well-known json. The simplest way is to use a nginx container
|
|
||||||
# to serve those two as static files. If you want to use a different way, delete or comment the below service, here
|
|
||||||
# and in the docker compose override file.
|
|
||||||
well-known:
|
|
||||||
image: nginx:latest
|
|
||||||
restart: unless-stopped
|
|
||||||
volumes:
|
|
||||||
- ./nginx/matrix.conf:/etc/nginx/conf.d/matrix.conf # the config to serve the .well-known/matrix files
|
|
||||||
- ./nginx/www:/var/www/ # location of the client and server .well-known-files
|
|
||||||
|
|
||||||
### Uncomment if you want to use your own Element-Web App.
|
### Uncomment if you want to use your own Element-Web App.
|
||||||
### Note: You need to provide a config.json for Element and you also need a second
|
### Note: You need to provide a config.json for Element and you also need a second
|
||||||
|
@ -64,33 +55,11 @@ services:
|
||||||
# depends_on:
|
# depends_on:
|
||||||
# - homeserver
|
# - homeserver
|
||||||
|
|
||||||
traefik:
|
|
||||||
image: "traefik:latest"
|
|
||||||
container_name: "traefik"
|
|
||||||
restart: "unless-stopped"
|
|
||||||
ports:
|
|
||||||
- "80:80"
|
|
||||||
- "443:443"
|
|
||||||
volumes:
|
|
||||||
- "/var/run/docker.sock:/var/run/docker.sock"
|
|
||||||
# - "./traefik_config:/etc/traefik"
|
|
||||||
- "acme:/etc/traefik/acme"
|
|
||||||
labels:
|
|
||||||
- "traefik.enable=true"
|
|
||||||
|
|
||||||
# middleware redirect
|
|
||||||
- "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https"
|
|
||||||
# global redirect to https
|
|
||||||
- "traefik.http.routers.redirs.rule=hostregexp(`{host:.+}`)"
|
|
||||||
- "traefik.http.routers.redirs.entrypoints=http"
|
|
||||||
- "traefik.http.routers.redirs.middlewares=redirect-to-https"
|
|
||||||
|
|
||||||
networks:
|
|
||||||
- proxy
|
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
db:
|
db:
|
||||||
acme:
|
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
|
# This is the network Traefik listens to, if your network has a different
|
||||||
|
# name, don't forget to change it here and in the docker-compose.override.yml
|
||||||
proxy:
|
proxy:
|
||||||
|
external: true
|
|
@ -1,19 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# If the config file does not contain a default port and the CONDUIT_PORT env is not set, create
|
|
||||||
# try to get port from process list
|
|
||||||
if [ -z "${CONDUIT_PORT}" ]; then
|
|
||||||
CONDUIT_PORT=$(ss -tlpn | grep conduit | grep -m1 -o ':[0-9]*' | grep -m1 -o '[0-9]*')
|
|
||||||
fi
|
|
||||||
|
|
||||||
# If CONDUIT_ADDRESS is not set try to get the address from the process list
|
|
||||||
if [ -z "${CONDUIT_ADDRESS}" ]; then
|
|
||||||
CONDUIT_ADDRESS=$(ss -tlpn | awk -F ' +|:' '/conduit/ { print $4 }')
|
|
||||||
fi
|
|
||||||
|
|
||||||
# The actual health check.
|
|
||||||
# We try to first get a response on HTTP and when that fails on HTTPS and when that fails, we exit with code 1.
|
|
||||||
# TODO: Change this to a single wget call. Do we have a config value that we can check for that?
|
|
||||||
wget --no-verbose --tries=1 --spider "http://${CONDUIT_ADDRESS}:${CONDUIT_PORT}/_matrix/client/versions" || \
|
|
||||||
wget --no-verbose --tries=1 --spider "https://${CONDUIT_ADDRESS}:${CONDUIT_PORT}/_matrix/client/versions" || \
|
|
||||||
exit 1
|
|
|
@ -1,14 +0,0 @@
|
||||||
# Summary
|
|
||||||
|
|
||||||
- [Introduction](introduction.md)
|
|
||||||
|
|
||||||
- [Configuration](configuration.md)
|
|
||||||
- [Delegation](delegation.md)
|
|
||||||
- [Deploying](deploying.md)
|
|
||||||
- [Generic](deploying/generic.md)
|
|
||||||
- [Debian](deploying/debian.md)
|
|
||||||
- [Docker](deploying/docker.md)
|
|
||||||
- [NixOS](deploying/nixos.md)
|
|
||||||
- [TURN](turn.md)
|
|
||||||
- [Appservices](appservices.md)
|
|
||||||
- [FAQ](faq.md)
|
|
|
@ -1,114 +0,0 @@
|
||||||
# Configuration
|
|
||||||
|
|
||||||
**Conduit** is configured using a TOML file. The configuration file is loaded from the path specified by the `CONDUIT_CONFIG` environment variable.
|
|
||||||
|
|
||||||
> **Note:** The configuration file is required to run Conduit. If the `CONDUIT_CONFIG` environment variable is not set, Conduit will exit with an error.
|
|
||||||
|
|
||||||
> **Note:** If you update the configuration file, you must restart Conduit for the changes to take effect
|
|
||||||
|
|
||||||
> **Note:** You can also configure Conduit by using `CONDUIT_{field_name}` environment variables. To set values inside a table, use `CONDUIT_{table_name}__{field_name}`. Example: `CONDUIT_SERVER_NAME="example.org"`
|
|
||||||
|
|
||||||
Conduit's configuration file is divided into the following sections:
|
|
||||||
|
|
||||||
- [Global](#global)
|
|
||||||
- [TLS](#tls)
|
|
||||||
- [Proxy](#proxy)
|
|
||||||
|
|
||||||
|
|
||||||
## Global
|
|
||||||
|
|
||||||
The `global` section contains the following fields:
|
|
||||||
|
|
||||||
> **Note:** The `*` symbol indicates that the field is required, and the values in **parentheses** are the possible values
|
|
||||||
|
|
||||||
| Field | Type | Description | Default |
|
|
||||||
| --- | --- | --- | --- |
|
|
||||||
| `address` | `string` | The address to bind to | `"127.0.0.1"` |
|
|
||||||
| `port` | `integer` | The port to bind to | `8000` |
|
|
||||||
| `tls` | `table` | See the [TLS configuration](#tls) | N/A |
|
|
||||||
| `server_name`_*_ | `string` | The server name | N/A |
|
|
||||||
| `database_backend`_*_ | `string` | The database backend to use (`"rocksdb"` *recommended*, `"sqlite"`) | N/A |
|
|
||||||
| `database_path`_*_ | `string` | The path to the database file/dir | N/A |
|
|
||||||
| `db_cache_capacity_mb` | `float` | The cache capacity, in MB | `300.0` |
|
|
||||||
| `enable_lightning_bolt` | `boolean` | Add `⚡️` emoji to end of user's display name | `true` |
|
|
||||||
| `allow_check_for_updates` | `boolean` | Allow Conduit to check for updates | `true` |
|
|
||||||
| `conduit_cache_capacity_modifier` | `float` | The value to multiply the default cache capacity by | `1.0` |
|
|
||||||
| `rocksdb_max_open_files` | `integer` | The maximum number of open files | `1000` |
|
|
||||||
| `pdu_cache_capacity` | `integer` | The maximum number of Persisted Data Units (PDUs) to cache | `150000` |
|
|
||||||
| `cleanup_second_interval` | `integer` | How often conduit should clean up the database, in seconds | `60` |
|
|
||||||
| `max_request_size` | `integer` | The maximum request size, in bytes | `20971520` (20 MiB) |
|
|
||||||
| `max_concurrent_requests` | `integer` | The maximum number of concurrent requests | `100` |
|
|
||||||
| `max_fetch_prev_events` | `integer` | The maximum number of previous events to fetch per request if conduit notices events are missing | `100` |
|
|
||||||
| `allow_registration` | `boolean` | Opens your homeserver to public registration | `false` |
|
|
||||||
| `registration_token` | `string` | The token users need to have when registering to your homeserver | N/A |
|
|
||||||
| `allow_encryption` | `boolean` | Allow users to enable encryption in their rooms | `true` |
|
|
||||||
| `allow_federation` | `boolean` | Allow federation with other servers | `true` |
|
|
||||||
| `allow_room_creation` | `boolean` | Allow users to create rooms | `true` |
|
|
||||||
| `allow_unstable_room_versions` | `boolean` | Allow users to create and join rooms with unstable versions | `true` |
|
|
||||||
| `default_room_version` | `string` | The default room version (`"6"`-`"10"`)| `"10"` |
|
|
||||||
| `allow_jaeger` | `boolean` | Allow Jaeger tracing | `false` |
|
|
||||||
| `tracing_flame` | `boolean` | Enable flame tracing | `false` |
|
|
||||||
| `proxy` | `table` | See the [Proxy configuration](#proxy) | N/A |
|
|
||||||
| `jwt_secret` | `string` | The secret used in the JWT to enable JWT login without it a 400 error will be returned | N/A |
|
|
||||||
| `trusted_servers` | `array` | The list of trusted servers to gather public keys of offline servers | `["matrix.org"]` |
|
|
||||||
| `log` | `string` | The log verbosity to use | `"warn"` |
|
|
||||||
| `turn_username` | `string` | The TURN username | `""` |
|
|
||||||
| `turn_password` | `string` | The TURN password | `""` |
|
|
||||||
| `turn_uris` | `array` | The TURN URIs | `[]` |
|
|
||||||
| `turn_secret` | `string` | The TURN secret | `""` |
|
|
||||||
| `turn_ttl` | `integer` | The TURN TTL in seconds | `86400` |
|
|
||||||
| `emergency_password` | `string` | Set a password to login as the `conduit` user in case of emergency | N/A |
|
|
||||||
| `well_known_client` | `string` | Used for [delegation](delegation.md) | See [delegation](delegation.md) |
|
|
||||||
| `well_known_server` | `string` | Used for [delegation](delegation.md) | See [delegation](delegation.md) |
|
|
||||||
|
|
||||||
|
|
||||||
### TLS
|
|
||||||
The `tls` table contains the following fields:
|
|
||||||
- `certs`: The path to the public PEM certificate
|
|
||||||
- `key`: The path to the PEM private key
|
|
||||||
|
|
||||||
#### Example
|
|
||||||
```toml
|
|
||||||
[global.tls]
|
|
||||||
certs = "/path/to/cert.pem"
|
|
||||||
key = "/path/to/key.pem"
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
### Proxy
|
|
||||||
You can choose what requests conduit should proxy (if any). The `proxy` table contains the following fields
|
|
||||||
|
|
||||||
#### Global
|
|
||||||
The global option will proxy all outgoing requests. The `global` table contains the following fields:
|
|
||||||
- `url`: The URL of the proxy server
|
|
||||||
##### Example
|
|
||||||
```toml
|
|
||||||
[global.proxy.global]
|
|
||||||
url = "https://example.com"
|
|
||||||
```
|
|
||||||
|
|
||||||
#### By domain
|
|
||||||
An array of tables that contain the following fields:
|
|
||||||
- `url`: The URL of the proxy server
|
|
||||||
- `include`: Domains that should be proxied (assumed to be `["*"]` if unset)
|
|
||||||
- `exclude`: Domains that should not be proxied (takes precedent over `include`)
|
|
||||||
|
|
||||||
Both `include` and `exclude` allow for glob pattern matching.
|
|
||||||
##### Example
|
|
||||||
In this example, all requests to domains ending in `.onion` and `matrix.secretly-an-onion-domain.xyz`
|
|
||||||
will be proxied via `socks://localhost:9050`, except for domains ending in `.myspecial.onion`. You can add as many `by_domain` tables as you need.
|
|
||||||
```toml
|
|
||||||
[[global.proxy.by_domain]]
|
|
||||||
url = "socks5://localhost:9050"
|
|
||||||
include = ["*.onion", "matrix.secretly-an-onion-domain.xyz"]
|
|
||||||
exclude = ["*.clearnet.onion"]
|
|
||||||
```
|
|
||||||
|
|
||||||
### Example
|
|
||||||
|
|
||||||
> **Note:** The following example is a minimal configuration file. You should replace the values with your own.
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[global]
|
|
||||||
{{#include ../conduit-example.toml:22:}}
|
|
||||||
```
|
|
|
@ -1,69 +0,0 @@
|
||||||
# Delegation
|
|
||||||
|
|
||||||
You can run Conduit on a separate domain than the actual server name (what shows up in user ids, aliases, etc.).
|
|
||||||
For example you can have your users have IDs such as `@foo:example.org` and have aliases like `#bar:example.org`,
|
|
||||||
while actually having Conduit hosted on the `matrix.example.org` domain. This is called delegation.
|
|
||||||
|
|
||||||
## Automatic (recommended)
|
|
||||||
|
|
||||||
Conduit has support for hosting delegation files by itself, and by default uses it to serve federation traffic on port 443.
|
|
||||||
|
|
||||||
With this method, you need to direct requests to `/.well-known/matrix/*` to Conduit in your reverse proxy.
|
|
||||||
|
|
||||||
This is only recommended if Conduit is on the same physical server as the server which serves your server name (e.g. example.org)
|
|
||||||
as servers don't always seem to cache the response, leading to slower response times otherwise, but it should also work if you
|
|
||||||
are connected to the server running Conduit using something like a VPN.
|
|
||||||
|
|
||||||
> **Note**: this will automatically allow you to use [sliding sync][0] without any extra configuration
|
|
||||||
|
|
||||||
To configure it, use the following options:
|
|
||||||
| Field | Type | Description | Default |
|
|
||||||
| --- | --- | --- | --- |
|
|
||||||
| `well_known_client` | `String` | The URL that clients should use to connect to Conduit | `https://<server_name>` |
|
|
||||||
| `well_known_server` | `String` | The hostname and port servers should use to connect to Conduit | `<server_name>:443` |
|
|
||||||
|
|
||||||
### Example
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[global]
|
|
||||||
well_known_client = "https://matrix.example.org"
|
|
||||||
well_known_server = "matrix.example.org:443"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Manual
|
|
||||||
|
|
||||||
Alternatively you can serve static JSON files to inform clients and servers how to connect to Conduit.
|
|
||||||
|
|
||||||
### Servers
|
|
||||||
|
|
||||||
For servers to discover how to access your domain, serve a response in the following format for `/.well-known/matrix/server`:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"m.server": "matrix.example.org:443"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
Where `matrix.example.org` is the domain and `443` is the port Conduit is accessible at.
|
|
||||||
|
|
||||||
### Clients
|
|
||||||
|
|
||||||
For clients to discover how to access your domain, serve a response in the following format for `/.well-known/matrix/client`:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"m.homeserver": {
|
|
||||||
"base_url": "https://matrix.example.org"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
Where `matrix.example.org` is the URL Conduit is accessible at.
|
|
||||||
|
|
||||||
To ensure that all clients can access this endpoint, it is recommended you set the following headers for this endpoint:
|
|
||||||
```
|
|
||||||
Access-Control-Allow-Origin: *
|
|
||||||
Access-Control-Allow-Methods: GET, POST, PUT, DELETE, OPTIONS
|
|
||||||
Access-Control-Allow-Headers: X-Requested-With, Content-Type, Authorization
|
|
||||||
```
|
|
||||||
|
|
||||||
If you also want to be able to use [sliding sync][0], look [here](faq.md#how-do-i-setup-sliding-sync).
|
|
||||||
|
|
||||||
[0]: https://matrix.org/blog/2023/09/matrix-2-0/#sliding-sync
|
|
|
@ -1,3 +0,0 @@
|
||||||
# Deploying
|
|
||||||
|
|
||||||
This chapter describes various ways to deploy Conduit.
|
|
|
@ -1 +0,0 @@
|
||||||
{{#include ../../debian/README.md}}
|
|
|
@ -1,69 +0,0 @@
|
||||||
# Conduit - Behind Traefik Reverse Proxy
|
|
||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
|
||||||
homeserver:
|
|
||||||
### If you already built the Conduit image with 'docker build' or want to use the Docker Hub image,
|
|
||||||
### then you are ready to go.
|
|
||||||
image: matrixconduit/matrix-conduit:latest
|
|
||||||
### If you want to build a fresh image from the sources, then comment the image line and uncomment the
|
|
||||||
### build lines. If you want meaningful labels in your built Conduit image, you should run docker compose like this:
|
|
||||||
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker compose up -d
|
|
||||||
# build:
|
|
||||||
# context: .
|
|
||||||
# args:
|
|
||||||
# CREATED: '2021-03-16T08:18:27Z'
|
|
||||||
# VERSION: '0.1.0'
|
|
||||||
# LOCAL: 'false'
|
|
||||||
# GIT_REF: origin/master
|
|
||||||
restart: unless-stopped
|
|
||||||
volumes:
|
|
||||||
- db:/var/lib/matrix-conduit/
|
|
||||||
networks:
|
|
||||||
- proxy
|
|
||||||
environment:
|
|
||||||
CONDUIT_SERVER_NAME: your.server.name # EDIT THIS
|
|
||||||
CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/
|
|
||||||
CONDUIT_DATABASE_BACKEND: rocksdb
|
|
||||||
CONDUIT_PORT: 6167
|
|
||||||
CONDUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
|
||||||
CONDUIT_ALLOW_REGISTRATION: 'true'
|
|
||||||
#CONDUIT_REGISTRATION_TOKEN: '' # require password for registration
|
|
||||||
CONDUIT_ALLOW_FEDERATION: 'true'
|
|
||||||
CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
|
||||||
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
|
||||||
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
|
||||||
CONDUIT_ADDRESS: 0.0.0.0
|
|
||||||
CONDUIT_CONFIG: '' # Ignore this
|
|
||||||
|
|
||||||
# We need some way to server the client and server .well-known json. The simplest way is to use a nginx container
|
|
||||||
# to serve those two as static files. If you want to use a different way, delete or comment the below service, here
|
|
||||||
# and in the docker compose override file.
|
|
||||||
well-known:
|
|
||||||
image: nginx:latest
|
|
||||||
restart: unless-stopped
|
|
||||||
volumes:
|
|
||||||
- ./nginx/matrix.conf:/etc/nginx/conf.d/matrix.conf # the config to serve the .well-known/matrix files
|
|
||||||
- ./nginx/www:/var/www/ # location of the client and server .well-known-files
|
|
||||||
### Uncomment if you want to use your own Element-Web App.
|
|
||||||
### Note: You need to provide a config.json for Element and you also need a second
|
|
||||||
### Domain or Subdomain for the communication between Element and Conduit
|
|
||||||
### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md
|
|
||||||
# element-web:
|
|
||||||
# image: vectorim/element-web:latest
|
|
||||||
# restart: unless-stopped
|
|
||||||
# volumes:
|
|
||||||
# - ./element_config.json:/app/config.json
|
|
||||||
# networks:
|
|
||||||
# - proxy
|
|
||||||
# depends_on:
|
|
||||||
# - homeserver
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
db:
|
|
||||||
|
|
||||||
networks:
|
|
||||||
# This is the network Traefik listens to, if your network has a different
|
|
||||||
# name, don't forget to change it here and in the docker-compose.override.yml
|
|
||||||
proxy:
|
|
||||||
external: true
|
|
|
@ -1,45 +0,0 @@
|
||||||
# Conduit - Traefik Reverse Proxy Labels
|
|
||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
|
||||||
homeserver:
|
|
||||||
labels:
|
|
||||||
- "traefik.enable=true"
|
|
||||||
- "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network
|
|
||||||
|
|
||||||
- "traefik.http.routers.to-conduit.rule=Host(`<SUBDOMAIN>.<DOMAIN>`)" # Change to the address on which Conduit is hosted
|
|
||||||
- "traefik.http.routers.to-conduit.tls=true"
|
|
||||||
- "traefik.http.routers.to-conduit.tls.certresolver=letsencrypt"
|
|
||||||
- "traefik.http.routers.to-conduit.middlewares=cors-headers@docker"
|
|
||||||
|
|
||||||
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowOriginList=*"
|
|
||||||
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization"
|
|
||||||
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS"
|
|
||||||
|
|
||||||
# We need some way to server the client and server .well-known json. The simplest way is to use a nginx container
|
|
||||||
# to serve those two as static files. If you want to use a different way, delete or comment the below service, here
|
|
||||||
# and in the docker compose file.
|
|
||||||
well-known:
|
|
||||||
labels:
|
|
||||||
- "traefik.enable=true"
|
|
||||||
- "traefik.docker.network=proxy"
|
|
||||||
|
|
||||||
- "traefik.http.routers.to-matrix-wellknown.rule=Host(`<SUBDOMAIN>.<DOMAIN>`) && PathPrefix(`/.well-known/matrix`)"
|
|
||||||
- "traefik.http.routers.to-matrix-wellknown.tls=true"
|
|
||||||
- "traefik.http.routers.to-matrix-wellknown.tls.certresolver=letsencrypt"
|
|
||||||
- "traefik.http.routers.to-matrix-wellknown.middlewares=cors-headers@docker"
|
|
||||||
|
|
||||||
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowOriginList=*"
|
|
||||||
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization"
|
|
||||||
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS"
|
|
||||||
|
|
||||||
|
|
||||||
### Uncomment this if you uncommented Element-Web App in the docker-compose.yml
|
|
||||||
# element-web:
|
|
||||||
# labels:
|
|
||||||
# - "traefik.enable=true"
|
|
||||||
# - "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network
|
|
||||||
|
|
||||||
# - "traefik.http.routers.to-element-web.rule=Host(`<SUBDOMAIN>.<DOMAIN>`)" # Change to the address on which Element-Web is hosted
|
|
||||||
# - "traefik.http.routers.to-element-web.tls=true"
|
|
||||||
# - "traefik.http.routers.to-element-web.tls.certresolver=letsencrypt"
|
|
|
@ -1,217 +0,0 @@
|
||||||
# Conduit for Docker
|
|
||||||
|
|
||||||
> **Note:** To run and use Conduit you should probably use it with a Domain or Subdomain behind a reverse proxy (like Nginx, Traefik, Apache, ...) with a Lets Encrypt certificate.
|
|
||||||
|
|
||||||
## Docker
|
|
||||||
|
|
||||||
To run Conduit with Docker you can either build the image yourself or pull it from a registry.
|
|
||||||
|
|
||||||
|
|
||||||
### Use a registry
|
|
||||||
|
|
||||||
OCI images for Conduit are available in the registries listed below. We recommend using the image tagged as `latest` from GitLab's own registry.
|
|
||||||
|
|
||||||
| Registry | Image | Size | Notes |
|
|
||||||
| --------------- | --------------------------------------------------------------- | ----------------------------- | ---------------------- |
|
|
||||||
| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:latest][gl] | ![Image Size][shield-latest] | Stable image. |
|
|
||||||
| Docker Hub | [docker.io/matrixconduit/matrix-conduit:latest][dh] | ![Image Size][shield-latest] | Stable image. |
|
|
||||||
| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:next][gl] | ![Image Size][shield-next] | Development version. |
|
|
||||||
| Docker Hub | [docker.io/matrixconduit/matrix-conduit:next][dh] | ![Image Size][shield-next] | Development version. |
|
|
||||||
|
|
||||||
|
|
||||||
[dh]: https://hub.docker.com/r/matrixconduit/matrix-conduit
|
|
||||||
[gl]: https://gitlab.com/famedly/conduit/container_registry/2497937
|
|
||||||
[shield-latest]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest
|
|
||||||
[shield-next]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/next
|
|
||||||
|
|
||||||
|
|
||||||
Use
|
|
||||||
```bash
|
|
||||||
docker image pull <link>
|
|
||||||
```
|
|
||||||
to pull it to your machine.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### Build using a dockerfile
|
|
||||||
|
|
||||||
The Dockerfile provided by Conduit has two stages, each of which creates an image.
|
|
||||||
|
|
||||||
1. **Builder:** Builds the binary from local context or by cloning a git revision from the official repository.
|
|
||||||
2. **Runner:** Copies the built binary from **Builder** and sets up the runtime environment, like creating a volume to persist the database and applying the correct permissions.
|
|
||||||
|
|
||||||
To build the image you can use the following command
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker build --tag matrixconduit/matrix-conduit:latest .
|
|
||||||
```
|
|
||||||
|
|
||||||
which also will tag the resulting image as `matrixconduit/matrix-conduit:latest`.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### Run
|
|
||||||
|
|
||||||
When you have the image you can simply run it with
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker run -d -p 8448:6167 \
|
|
||||||
-v db:/var/lib/matrix-conduit/ \
|
|
||||||
-e CONDUIT_SERVER_NAME="your.server.name" \
|
|
||||||
-e CONDUIT_DATABASE_BACKEND="rocksdb" \
|
|
||||||
-e CONDUIT_ALLOW_REGISTRATION=true \
|
|
||||||
-e CONDUIT_ALLOW_FEDERATION=true \
|
|
||||||
-e CONDUIT_MAX_REQUEST_SIZE="20000000" \
|
|
||||||
-e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \
|
|
||||||
-e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \
|
|
||||||
-e CONDUIT_PORT="6167" \
|
|
||||||
--name conduit <link>
|
|
||||||
```
|
|
||||||
|
|
||||||
or you can use [docker compose](#docker-compose).
|
|
||||||
|
|
||||||
The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../configuration.md).
|
|
||||||
You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need
|
|
||||||
to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible values, please take a look at the `docker-compose.yml` file.
|
|
||||||
|
|
||||||
If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it.
|
|
||||||
|
|
||||||
### Docker compose
|
|
||||||
|
|
||||||
If the `docker run` command is not for you or your setup, you can also use one of the provided `docker compose` files.
|
|
||||||
|
|
||||||
Depending on your proxy setup, you can use one of the following files;
|
|
||||||
- If you already have a `traefik` instance set up, use [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml)
|
|
||||||
- If you don't have a `traefik` instance set up (or any other reverse proxy), use [`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)
|
|
||||||
- For any other reverse proxy, use [`docker-compose.yml`](docker-compose.yml)
|
|
||||||
|
|
||||||
When picking the traefik-related compose file, rename it so it matches `docker-compose.yml`, and
|
|
||||||
rename the override file to `docker-compose.override.yml`. Edit the latter with the values you want
|
|
||||||
for your server.
|
|
||||||
Additional info about deploying Conduit can be found [here](generic.md).
|
|
||||||
|
|
||||||
### Build
|
|
||||||
|
|
||||||
To build the Conduit image with docker compose, you first need to open and modify the `docker-compose.yml` file. There you need to comment the `image:` option and uncomment the `build:` option. Then call docker compose with:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker compose up
|
|
||||||
```
|
|
||||||
|
|
||||||
This will also start the container right afterwards, so if want it to run in detached mode, you also should use the `-d` flag.
|
|
||||||
|
|
||||||
### Run
|
|
||||||
|
|
||||||
If you already have built the image or want to use one from the registries, you can just start the container and everything else in the compose file in detached mode with:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker compose up -d
|
|
||||||
```
|
|
||||||
|
|
||||||
> **Note:** Don't forget to modify and adjust the compose file to your needs.
|
|
||||||
|
|
||||||
### Use Traefik as Proxy
|
|
||||||
|
|
||||||
As a container user, you probably know about Traefik. It is a easy to use reverse proxy for making
|
|
||||||
containerized app and services available through the web. With the two provided files,
|
|
||||||
[`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or
|
|
||||||
[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and
|
|
||||||
[`docker-compose.override.yml`](docker-compose.override.yml), it is equally easy to deploy
|
|
||||||
and use Conduit, with a little caveat. If you already took a look at the files, then you should have
|
|
||||||
seen the `well-known` service, and that is the little caveat. Traefik is simply a proxy and
|
|
||||||
loadbalancer and is not able to serve any kind of content, but for Conduit to federate, we need to
|
|
||||||
either expose ports `443` and `8448` or serve two endpoints `.well-known/matrix/client` and
|
|
||||||
`.well-known/matrix/server`.
|
|
||||||
|
|
||||||
With the service `well-known` we use a single `nginx` container that will serve those two files.
|
|
||||||
|
|
||||||
So...step by step:
|
|
||||||
|
|
||||||
1. Copy [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or
|
|
||||||
[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and [`docker-compose.override.yml`](docker-compose.override.yml) from the repository and remove `.for-traefik` (or `.with-traefik`) from the filename.
|
|
||||||
2. Open both files and modify/adjust them to your needs. Meaning, change the `CONDUIT_SERVER_NAME` and the volume host mappings according to your needs.
|
|
||||||
3. Create the `conduit.toml` config file, an example can be found [here](../configuration.md), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars.
|
|
||||||
4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`.
|
|
||||||
5. Create the files needed by the `well-known` service.
|
|
||||||
|
|
||||||
- `./nginx/matrix.conf` (relative to the compose file, you can change this, but then also need to change the volume mapping)
|
|
||||||
|
|
||||||
```nginx
|
|
||||||
server {
|
|
||||||
server_name <SUBDOMAIN>.<DOMAIN>;
|
|
||||||
listen 80 default_server;
|
|
||||||
|
|
||||||
location /.well-known/matrix/server {
|
|
||||||
return 200 '{"m.server": "<SUBDOMAIN>.<DOMAIN>:443"}';
|
|
||||||
types { } default_type "application/json; charset=utf-8";
|
|
||||||
}
|
|
||||||
|
|
||||||
location /.well-known/matrix/client {
|
|
||||||
return 200 '{"m.homeserver": {"base_url": "https://<SUBDOMAIN>.<DOMAIN>"}}';
|
|
||||||
types { } default_type "application/json; charset=utf-8";
|
|
||||||
add_header "Access-Control-Allow-Origin" *;
|
|
||||||
}
|
|
||||||
|
|
||||||
location / {
|
|
||||||
return 404;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
6. Run `docker compose up -d`
|
|
||||||
7. Connect to your homeserver with your preferred client and create a user. You should do this immediately after starting Conduit, because the first created user is the admin.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Voice communication
|
|
||||||
|
|
||||||
In order to make or receive calls, a TURN server is required. Conduit suggests using [Coturn](https://github.com/coturn/coturn) for this purpose, which is also available as a Docker image. Before proceeding with the software installation, it is essential to have the necessary configurations in place.
|
|
||||||
|
|
||||||
### Configuration
|
|
||||||
|
|
||||||
Create a configuration file called `coturn.conf` containing:
|
|
||||||
|
|
||||||
```conf
|
|
||||||
use-auth-secret
|
|
||||||
static-auth-secret=<a secret key>
|
|
||||||
realm=<your server domain>
|
|
||||||
```
|
|
||||||
A common way to generate a suitable alphanumeric secret key is by using `pwgen -s 64 1`.
|
|
||||||
|
|
||||||
These same values need to be set in conduit. You can either modify conduit.toml to include these lines:
|
|
||||||
```
|
|
||||||
turn_uris = ["turn:<your server domain>?transport=udp", "turn:<your server domain>?transport=tcp"]
|
|
||||||
turn_secret = "<secret key from coturn configuration>"
|
|
||||||
```
|
|
||||||
or append the following to the docker environment variables dependig on which configuration method you used earlier:
|
|
||||||
```yml
|
|
||||||
CONDUIT_TURN_URIS: '["turn:<your server domain>?transport=udp", "turn:<your server domain>?transport=tcp"]'
|
|
||||||
CONDUIT_TURN_SECRET: "<secret key from coturn configuration>"
|
|
||||||
```
|
|
||||||
Restart Conduit to apply these changes.
|
|
||||||
|
|
||||||
### Run
|
|
||||||
Run the [Coturn](https://hub.docker.com/r/coturn/coturn) image using
|
|
||||||
```bash
|
|
||||||
docker run -d --network=host -v $(pwd)/coturn.conf:/etc/coturn/turnserver.conf coturn/coturn
|
|
||||||
```
|
|
||||||
|
|
||||||
or docker compose. For the latter, paste the following section into a file called `docker-compose.yml`
|
|
||||||
and run `docker compose up -d` in the same directory.
|
|
||||||
|
|
||||||
```yml
|
|
||||||
version: 3
|
|
||||||
services:
|
|
||||||
turn:
|
|
||||||
container_name: coturn-server
|
|
||||||
image: docker.io/coturn/coturn
|
|
||||||
restart: unless-stopped
|
|
||||||
network_mode: "host"
|
|
||||||
volumes:
|
|
||||||
- ./coturn.conf:/etc/coturn/turnserver.conf
|
|
||||||
```
|
|
||||||
|
|
||||||
To understand why the host networking mode is used and explore alternative configuration options, please visit the following link: https://github.com/coturn/coturn/blob/master/docker/coturn/README.md.
|
|
||||||
For security recommendations see Synapse's [Coturn documentation](https://github.com/matrix-org/synapse/blob/develop/docs/setup/turn/coturn.md#configuration).
|
|
||||||
|
|
|
@ -1,289 +0,0 @@
|
||||||
# Generic deployment documentation
|
|
||||||
|
|
||||||
> ## Getting help
|
|
||||||
>
|
|
||||||
> If you run into any problems while setting up Conduit, write an email to `conduit@koesters.xyz`, ask us
|
|
||||||
> in `#conduit:fachschaften.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new).
|
|
||||||
|
|
||||||
## Installing Conduit
|
|
||||||
|
|
||||||
Although you might be able to compile Conduit for Windows, we do recommend running it on a Linux server. We therefore
|
|
||||||
only offer Linux binaries.
|
|
||||||
|
|
||||||
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. For `arm`, you should use `aarch`. Now copy the appropriate url:
|
|
||||||
|
|
||||||
**Stable/Main versions:**
|
|
||||||
|
|
||||||
| Target | Type | Download |
|
|
||||||
|-|-|-|
|
|
||||||
| `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/x86_64-unknown-linux-musl.deb?job=artifacts) |
|
|
||||||
| `aarch64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/aarch64-unknown-linux-musl.deb?job=artifacts) |
|
|
||||||
| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/x86_64-unknown-linux-musl?job=artifacts) |
|
|
||||||
| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/aarch64-unknown-linux-musl?job=artifacts) |
|
|
||||||
| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/oci-image-amd64.tar.gz?job=artifacts) |
|
|
||||||
| `aarch64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/oci-image-arm64v8.tar.gz?job=artifacts) |
|
|
||||||
|
|
||||||
These builds were created on and linked against the glibc version shipped with Debian bullseye.
|
|
||||||
If you use a system with an older glibc version (e.g. RHEL8), you might need to compile Conduit yourself.
|
|
||||||
|
|
||||||
**Latest/Next versions:**
|
|
||||||
|
|
||||||
| Target | Type | Download |
|
|
||||||
|-|-|-|
|
|
||||||
| `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl.deb?job=artifacts) |
|
|
||||||
| `aarch64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl.deb?job=artifacts) |
|
|
||||||
| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl?job=artifacts) |
|
|
||||||
| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl?job=artifacts) |
|
|
||||||
| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=artifacts) |
|
|
||||||
| `aarch64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-arm64v8.tar.gz?job=artifacts) |
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ sudo wget -O /usr/local/bin/matrix-conduit <url>
|
|
||||||
$ sudo chmod +x /usr/local/bin/matrix-conduit
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively, you may compile the binary yourself. First, install any dependencies:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Debian
|
|
||||||
$ sudo apt install libclang-dev build-essential
|
|
||||||
|
|
||||||
# RHEL
|
|
||||||
$ sudo dnf install clang
|
|
||||||
```
|
|
||||||
Then, `cd` into the source tree of conduit-next and run:
|
|
||||||
```bash
|
|
||||||
$ cargo build --release
|
|
||||||
```
|
|
||||||
|
|
||||||
## Adding a Conduit user
|
|
||||||
|
|
||||||
While Conduit can run as any user it is usually better to use dedicated users for different services. This also allows
|
|
||||||
you to make sure that the file permissions are correctly set up.
|
|
||||||
|
|
||||||
In Debian or RHEL, you can use this command to create a Conduit user:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
sudo adduser --system conduit --group --disabled-login --no-create-home
|
|
||||||
```
|
|
||||||
|
|
||||||
## Forwarding ports in the firewall or the router
|
|
||||||
|
|
||||||
Conduit uses the ports 443 and 8448 both of which need to be open in the firewall.
|
|
||||||
|
|
||||||
If Conduit runs behind a router or in a container and has a different public IP address than the host system these public ports need to be forwarded directly or indirectly to the port mentioned in the config.
|
|
||||||
|
|
||||||
## Optional: Avoid port 8448
|
|
||||||
|
|
||||||
If Conduit runs behind Cloudflare reverse proxy, which doesn't support port 8448 on free plans, [delegation](https://matrix-org.github.io/synapse/latest/delegate.html) can be set up to have federation traffic routed to port 443:
|
|
||||||
```apache
|
|
||||||
# .well-known delegation on Apache
|
|
||||||
<Files "/.well-known/matrix/server">
|
|
||||||
ErrorDocument 200 '{"m.server": "your.server.name:443"}'
|
|
||||||
Header always set Content-Type application/json
|
|
||||||
Header always set Access-Control-Allow-Origin *
|
|
||||||
</Files>
|
|
||||||
```
|
|
||||||
[SRV DNS record](https://spec.matrix.org/latest/server-server-api/#resolving-server-names) delegation is also [possible](https://www.cloudflare.com/en-gb/learning/dns/dns-records/dns-srv-record/).
|
|
||||||
|
|
||||||
## Setting up a systemd service
|
|
||||||
|
|
||||||
Now we'll set up a systemd service for Conduit, so it's easy to start/stop Conduit and set it to autostart when your
|
|
||||||
server reboots. Simply paste the default systemd service you can find below into
|
|
||||||
`/etc/systemd/system/conduit.service`.
|
|
||||||
|
|
||||||
```systemd
|
|
||||||
[Unit]
|
|
||||||
Description=Conduit Matrix Server
|
|
||||||
After=network.target
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Environment="CONDUIT_CONFIG=/etc/matrix-conduit/conduit.toml"
|
|
||||||
User=conduit
|
|
||||||
Group=conduit
|
|
||||||
Restart=always
|
|
||||||
ExecStart=/usr/local/bin/matrix-conduit
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
```
|
|
||||||
|
|
||||||
Finally, run
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ sudo systemctl daemon-reload
|
|
||||||
```
|
|
||||||
|
|
||||||
## Creating the Conduit configuration file
|
|
||||||
|
|
||||||
Now we need to create the Conduit's config file in
|
|
||||||
`/etc/matrix-conduit/conduit.toml`. Paste in the contents of
|
|
||||||
[`conduit-example.toml`](../configuration.md) **and take a moment to read it.
|
|
||||||
You need to change at least the server name.**
|
|
||||||
You can also choose to use a different database backend, but right now only `rocksdb` and `sqlite` are recommended.
|
|
||||||
|
|
||||||
## Setting the correct file permissions
|
|
||||||
|
|
||||||
As we are using a Conduit specific user we need to allow it to read the config. To do that you can run this command on
|
|
||||||
Debian or RHEL:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
sudo chown -R root:root /etc/matrix-conduit
|
|
||||||
sudo chmod 755 /etc/matrix-conduit
|
|
||||||
```
|
|
||||||
|
|
||||||
If you use the default database path you also need to run this:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
sudo mkdir -p /var/lib/matrix-conduit/
|
|
||||||
sudo chown -R conduit:conduit /var/lib/matrix-conduit/
|
|
||||||
sudo chmod 700 /var/lib/matrix-conduit/
|
|
||||||
```
|
|
||||||
|
|
||||||
## Setting up the Reverse Proxy
|
|
||||||
|
|
||||||
This depends on whether you use Apache, Caddy, Nginx or another web server.
|
|
||||||
|
|
||||||
### Apache
|
|
||||||
|
|
||||||
Create `/etc/apache2/sites-enabled/050-conduit.conf` and copy-and-paste this:
|
|
||||||
|
|
||||||
```apache
|
|
||||||
# Requires mod_proxy and mod_proxy_http
|
|
||||||
#
|
|
||||||
# On Apache instance compiled from source,
|
|
||||||
# paste into httpd-ssl.conf or httpd.conf
|
|
||||||
|
|
||||||
Listen 8448
|
|
||||||
|
|
||||||
<VirtualHost *:443 *:8448>
|
|
||||||
|
|
||||||
ServerName your.server.name # EDIT THIS
|
|
||||||
|
|
||||||
AllowEncodedSlashes NoDecode
|
|
||||||
ProxyPass /_matrix/ http://127.0.0.1:6167/_matrix/ timeout=300 nocanon
|
|
||||||
ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/
|
|
||||||
|
|
||||||
</VirtualHost>
|
|
||||||
```
|
|
||||||
|
|
||||||
**You need to make some edits again.** When you are done, run
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Debian
|
|
||||||
$ sudo systemctl reload apache2
|
|
||||||
|
|
||||||
# Installed from source
|
|
||||||
$ sudo apachectl -k graceful
|
|
||||||
```
|
|
||||||
|
|
||||||
### Caddy
|
|
||||||
|
|
||||||
Create `/etc/caddy/conf.d/conduit_caddyfile` and enter this (substitute for your server name).
|
|
||||||
|
|
||||||
```caddy
|
|
||||||
your.server.name, your.server.name:8448 {
|
|
||||||
reverse_proxy /_matrix/* 127.0.0.1:6167
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
That's it! Just start or enable the service and you're set.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ sudo systemctl enable caddy
|
|
||||||
```
|
|
||||||
|
|
||||||
### Nginx
|
|
||||||
|
|
||||||
If you use Nginx and not Apache, add the following server section inside the http section of `/etc/nginx/nginx.conf`
|
|
||||||
|
|
||||||
```nginx
|
|
||||||
server {
|
|
||||||
listen 443 ssl http2;
|
|
||||||
listen [::]:443 ssl http2;
|
|
||||||
listen 8448 ssl http2;
|
|
||||||
listen [::]:8448 ssl http2;
|
|
||||||
server_name your.server.name; # EDIT THIS
|
|
||||||
merge_slashes off;
|
|
||||||
|
|
||||||
# Nginx defaults to only allow 1MB uploads
|
|
||||||
# Increase this to allow posting large files such as videos
|
|
||||||
client_max_body_size 20M;
|
|
||||||
|
|
||||||
location /_matrix/ {
|
|
||||||
proxy_pass http://127.0.0.1:6167;
|
|
||||||
proxy_set_header Host $http_host;
|
|
||||||
proxy_buffering off;
|
|
||||||
proxy_read_timeout 5m;
|
|
||||||
}
|
|
||||||
|
|
||||||
ssl_certificate /etc/letsencrypt/live/your.server.name/fullchain.pem; # EDIT THIS
|
|
||||||
ssl_certificate_key /etc/letsencrypt/live/your.server.name/privkey.pem; # EDIT THIS
|
|
||||||
ssl_trusted_certificate /etc/letsencrypt/live/your.server.name/chain.pem; # EDIT THIS
|
|
||||||
include /etc/letsencrypt/options-ssl-nginx.conf;
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
**You need to make some edits again.** When you are done, run
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ sudo systemctl reload nginx
|
|
||||||
```
|
|
||||||
|
|
||||||
## SSL Certificate
|
|
||||||
|
|
||||||
If you chose Caddy as your web proxy SSL certificates are handled automatically and you can skip this step.
|
|
||||||
|
|
||||||
The easiest way to get an SSL certificate, if you don't have one already, is to [install](https://certbot.eff.org/instructions) `certbot` and run this:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# To use ECC for the private key,
|
|
||||||
# paste into /etc/letsencrypt/cli.ini:
|
|
||||||
# key-type = ecdsa
|
|
||||||
# elliptic-curve = secp384r1
|
|
||||||
|
|
||||||
$ sudo certbot -d your.server.name
|
|
||||||
```
|
|
||||||
[Automated renewal](https://eff-certbot.readthedocs.io/en/stable/using.html#automated-renewals) is usually preconfigured.
|
|
||||||
|
|
||||||
If using Cloudflare, configure instead the edge and origin certificates in dashboard. In case you’re already running a website on the same Apache server, you can just copy-and-paste the SSL configuration from your main virtual host on port 443 into the above-mentioned vhost.
|
|
||||||
|
|
||||||
## You're done!
|
|
||||||
|
|
||||||
Now you can start Conduit with:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ sudo systemctl start conduit
|
|
||||||
```
|
|
||||||
|
|
||||||
Set it to start automatically when your system boots with:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ sudo systemctl enable conduit
|
|
||||||
```
|
|
||||||
|
|
||||||
## How do I know it works?
|
|
||||||
|
|
||||||
You can open [a Matrix client](https://matrix.org/ecosystem/clients), enter your homeserver and try to register. If you are using a registration token, use [Element web](https://app.element.io/), [Nheko](https://matrix.org/ecosystem/clients/nheko/) or [SchildiChat web](https://app.schildi.chat/), as they support this feature.
|
|
||||||
|
|
||||||
You can also use these commands as a quick health check.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ curl https://your.server.name/_matrix/client/versions
|
|
||||||
|
|
||||||
# If using port 8448
|
|
||||||
$ curl https://your.server.name:8448/_matrix/client/versions
|
|
||||||
```
|
|
||||||
|
|
||||||
- To check if your server can talk with other homeservers, you can use the [Matrix Federation Tester](https://federationtester.matrix.org/).
|
|
||||||
If you can register but cannot join federated rooms check your config again and also check if the port 8448 is open and forwarded correctly.
|
|
||||||
|
|
||||||
# What's next?
|
|
||||||
|
|
||||||
## Audio/Video calls
|
|
||||||
|
|
||||||
For Audio/Video call functionality see the [TURN Guide](../turn.md).
|
|
||||||
|
|
||||||
## Appservices
|
|
||||||
|
|
||||||
If you want to set up an appservice, take a look at the [Appservice Guide](../appservices.md).
|
|
|
@ -1,18 +0,0 @@
|
||||||
# Conduit for NixOS
|
|
||||||
|
|
||||||
Conduit can be acquired by Nix from various places:
|
|
||||||
|
|
||||||
* The `flake.nix` at the root of the repo
|
|
||||||
* The `default.nix` at the root of the repo
|
|
||||||
* From Nixpkgs
|
|
||||||
|
|
||||||
The `flake.nix` and `default.nix` do not (currently) provide a NixOS module, so
|
|
||||||
(for now) [`services.matrix-conduit`][module] from Nixpkgs should be used to
|
|
||||||
configure Conduit.
|
|
||||||
|
|
||||||
If you want to run the latest code, you should get Conduit from the `flake.nix`
|
|
||||||
or `default.nix` and set [`services.matrix-conduit.package`][package]
|
|
||||||
appropriately.
|
|
||||||
|
|
||||||
[module]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit
|
|
||||||
[package]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit.package
|
|
41
docs/faq.md
41
docs/faq.md
|
@ -1,41 +0,0 @@
|
||||||
# FAQ
|
|
||||||
|
|
||||||
Here are some of the most frequently asked questions about Conduit, and their answers.
|
|
||||||
|
|
||||||
## Why do I get a `M_INCOMPATIBLE_ROOM_VERSION` error when trying to join some rooms?
|
|
||||||
|
|
||||||
Conduit doesn't support room versions 1 and 2 at all, and doesn't properly support versions 3-5 currently. You can track the progress of adding support [here](https://gitlab.com/famedly/conduit/-/issues/433).
|
|
||||||
|
|
||||||
## How do I backup my server?
|
|
||||||
|
|
||||||
To backup your Conduit server, it's very easy.
|
|
||||||
You can simply stop Conduit, make a copy or file system snapshot of the database directory, then start Conduit again.
|
|
||||||
|
|
||||||
> **Note**: When using a file system snapshot, it is not required that you stop the server, but it is still recommended as it is the safest option and should ensure your database is not left in an inconsistent state.
|
|
||||||
|
|
||||||
## How do I setup sliding sync?
|
|
||||||
|
|
||||||
If you use the [automatic method for delegation](delegation.md#automatic-recommended) or just proxy `.well-known/matrix/client` to Conduit, sliding sync should work with no extra configuration.
|
|
||||||
If you don't, continue below.
|
|
||||||
|
|
||||||
You need to add a `org.matrix.msc3575.proxy` field to your `.well-known/matrix/client` response which contains a url which Conduit is accessible behind.
|
|
||||||
Here is an example:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
~ "m.homeserver": {
|
|
||||||
~ "base_url": "https://matrix.example.org"
|
|
||||||
~ },
|
|
||||||
"org.matrix.msc3575.proxy": {
|
|
||||||
"url": "https://matrix.example.org"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Can I migrate from Synapse to Conduit?
|
|
||||||
|
|
||||||
Not really. You can reuse the domain of your current server with Conduit, but you will not be able to migrate accounts automatically.
|
|
||||||
Rooms that were federated can be re-joined via the other participating servers, however media and the like may be deleted from remote servers after some time, and hence might not be recoverable.
|
|
||||||
|
|
||||||
## How do I make someone an admin?
|
|
||||||
|
|
||||||
Simply invite them to the admin room. Once joined, they can administer the server by interacting with the `@conduit:<server_name>` user.
|
|
|
@ -1,13 +0,0 @@
|
||||||
# Conduit
|
|
||||||
|
|
||||||
{{#include ../README.md:catchphrase}}
|
|
||||||
|
|
||||||
{{#include ../README.md:body}}
|
|
||||||
|
|
||||||
#### How can I deploy my own?
|
|
||||||
|
|
||||||
- [Deployment options](deploying.md)
|
|
||||||
|
|
||||||
If you want to connect an Appservice to Conduit, take a look at the [appservices documentation](appservices.md).
|
|
||||||
|
|
||||||
{{#include ../README.md:footer}}
|
|
25
docs/turn.md
25
docs/turn.md
|
@ -1,25 +0,0 @@
|
||||||
# Setting up TURN/STUN
|
|
||||||
|
|
||||||
## General instructions
|
|
||||||
|
|
||||||
* It is assumed you have a [Coturn server](https://github.com/coturn/coturn) up and running. See [Synapse reference implementation](https://github.com/element-hq/synapse/blob/develop/docs/turn-howto.md).
|
|
||||||
|
|
||||||
## Edit/Add a few settings to your existing conduit.toml
|
|
||||||
|
|
||||||
```
|
|
||||||
# Refer to your Coturn settings.
|
|
||||||
# `your.turn.url` has to match the REALM setting of your Coturn as well as `transport`.
|
|
||||||
turn_uris = ["turn:your.turn.url?transport=udp", "turn:your.turn.url?transport=tcp"]
|
|
||||||
|
|
||||||
# static-auth-secret of your turnserver
|
|
||||||
turn_secret = "ADD SECRET HERE"
|
|
||||||
|
|
||||||
# If you have your TURN server configured to use a username and password
|
|
||||||
# you can provide these information too. In this case comment out `turn_secret above`!
|
|
||||||
#turn_username = ""
|
|
||||||
#turn_password = ""
|
|
||||||
```
|
|
||||||
|
|
||||||
## Apply settings
|
|
||||||
|
|
||||||
Restart Conduit.
|
|
79
engage.toml
79
engage.toml
|
@ -1,79 +0,0 @@
|
||||||
interpreter = ["bash", "-euo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "versions"
|
|
||||||
name = "engage"
|
|
||||||
script = "engage --version"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "versions"
|
|
||||||
name = "rustc"
|
|
||||||
script = "rustc --version"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "versions"
|
|
||||||
name = "cargo"
|
|
||||||
script = "cargo --version"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "versions"
|
|
||||||
name = "cargo-fmt"
|
|
||||||
script = "cargo fmt --version"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "versions"
|
|
||||||
name = "rustdoc"
|
|
||||||
script = "rustdoc --version"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "versions"
|
|
||||||
name = "cargo-clippy"
|
|
||||||
script = "cargo clippy -- --version"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "versions"
|
|
||||||
name = "lychee"
|
|
||||||
script = "lychee --version"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "lints"
|
|
||||||
name = "cargo-fmt"
|
|
||||||
script = "cargo fmt --check -- --color=always"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "lints"
|
|
||||||
name = "cargo-doc"
|
|
||||||
script = """
|
|
||||||
RUSTDOCFLAGS="-D warnings" cargo doc \
|
|
||||||
--workspace \
|
|
||||||
--no-deps \
|
|
||||||
--document-private-items \
|
|
||||||
--color always
|
|
||||||
"""
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "lints"
|
|
||||||
name = "cargo-clippy"
|
|
||||||
script = "cargo clippy --workspace --all-targets --color=always -- -D warnings"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "lints"
|
|
||||||
name = "taplo-fmt"
|
|
||||||
script = "taplo fmt --check --colors always"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "lints"
|
|
||||||
name = "lychee"
|
|
||||||
script = "lychee --offline docs"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
group = "tests"
|
|
||||||
name = "cargo"
|
|
||||||
script = """
|
|
||||||
cargo test \
|
|
||||||
--workspace \
|
|
||||||
--all-targets \
|
|
||||||
--color=always \
|
|
||||||
-- \
|
|
||||||
--color=always
|
|
||||||
"""
|
|
263
flake.lock
generated
263
flake.lock
generated
|
@ -1,263 +0,0 @@
|
||||||
{
|
|
||||||
"nodes": {
|
|
||||||
"attic": {
|
|
||||||
"inputs": {
|
|
||||||
"crane": "crane",
|
|
||||||
"flake-compat": "flake-compat",
|
|
||||||
"flake-utils": "flake-utils",
|
|
||||||
"nixpkgs": "nixpkgs",
|
|
||||||
"nixpkgs-stable": "nixpkgs-stable"
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1707922053,
|
|
||||||
"narHash": "sha256-wSZjK+rOXn+UQiP1NbdNn5/UW6UcBxjvlqr2wh++MbM=",
|
|
||||||
"owner": "zhaofengli",
|
|
||||||
"repo": "attic",
|
|
||||||
"rev": "6eabc3f02fae3683bffab483e614bebfcd476b21",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "zhaofengli",
|
|
||||||
"ref": "main",
|
|
||||||
"repo": "attic",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"crane": {
|
|
||||||
"inputs": {
|
|
||||||
"nixpkgs": [
|
|
||||||
"attic",
|
|
||||||
"nixpkgs"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1702918879,
|
|
||||||
"narHash": "sha256-tWJqzajIvYcaRWxn+cLUB9L9Pv4dQ3Bfit/YjU5ze3g=",
|
|
||||||
"owner": "ipetkov",
|
|
||||||
"repo": "crane",
|
|
||||||
"rev": "7195c00c272fdd92fc74e7d5a0a2844b9fadb2fb",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "ipetkov",
|
|
||||||
"repo": "crane",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"crane_2": {
|
|
||||||
"inputs": {
|
|
||||||
"nixpkgs": [
|
|
||||||
"nixpkgs"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1713721181,
|
|
||||||
"narHash": "sha256-Vz1KRVTzU3ClBfyhOj8gOehZk21q58T1YsXC30V23PU=",
|
|
||||||
"owner": "ipetkov",
|
|
||||||
"repo": "crane",
|
|
||||||
"rev": "55f4939ac59ff8f89c6a4029730a2d49ea09105f",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "ipetkov",
|
|
||||||
"ref": "master",
|
|
||||||
"repo": "crane",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"fenix": {
|
|
||||||
"inputs": {
|
|
||||||
"nixpkgs": [
|
|
||||||
"nixpkgs"
|
|
||||||
],
|
|
||||||
"rust-analyzer-src": "rust-analyzer-src"
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1709619709,
|
|
||||||
"narHash": "sha256-l6EPVJfwfelWST7qWQeP6t/TDK3HHv5uUB1b2vw4mOQ=",
|
|
||||||
"owner": "nix-community",
|
|
||||||
"repo": "fenix",
|
|
||||||
"rev": "c8943ea9e98d41325ff57d4ec14736d330b321b2",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "nix-community",
|
|
||||||
"repo": "fenix",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"flake-compat": {
|
|
||||||
"flake": false,
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1673956053,
|
|
||||||
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
|
|
||||||
"owner": "edolstra",
|
|
||||||
"repo": "flake-compat",
|
|
||||||
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "edolstra",
|
|
||||||
"repo": "flake-compat",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"flake-compat_2": {
|
|
||||||
"flake": false,
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1696426674,
|
|
||||||
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
|
|
||||||
"owner": "edolstra",
|
|
||||||
"repo": "flake-compat",
|
|
||||||
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "edolstra",
|
|
||||||
"repo": "flake-compat",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"flake-utils": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1667395993,
|
|
||||||
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "flake-utils",
|
|
||||||
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "flake-utils",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"flake-utils_2": {
|
|
||||||
"inputs": {
|
|
||||||
"systems": "systems"
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1709126324,
|
|
||||||
"narHash": "sha256-q6EQdSeUZOG26WelxqkmR7kArjgWCdw5sfJVHPH/7j8=",
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "flake-utils",
|
|
||||||
"rev": "d465f4819400de7c8d874d50b982301f28a84605",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "flake-utils",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nix-filter": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1705332318,
|
|
||||||
"narHash": "sha256-kcw1yFeJe9N4PjQji9ZeX47jg0p9A0DuU4djKvg1a7I=",
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "nix-filter",
|
|
||||||
"rev": "3449dc925982ad46246cfc36469baf66e1b64f17",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "nix-filter",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nixpkgs": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1702539185,
|
|
||||||
"narHash": "sha256-KnIRG5NMdLIpEkZTnN5zovNYc0hhXjAgv6pfd5Z4c7U=",
|
|
||||||
"owner": "NixOS",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"rev": "aa9d4729cbc99dabacb50e3994dcefb3ea0f7447",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "NixOS",
|
|
||||||
"ref": "nixpkgs-unstable",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nixpkgs-stable": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1702780907,
|
|
||||||
"narHash": "sha256-blbrBBXjjZt6OKTcYX1jpe9SRof2P9ZYWPzq22tzXAA=",
|
|
||||||
"owner": "NixOS",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"rev": "1e2e384c5b7c50dbf8e9c441a9e58d85f408b01f",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "NixOS",
|
|
||||||
"ref": "nixos-23.11",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nixpkgs_2": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1709479366,
|
|
||||||
"narHash": "sha256-n6F0n8UV6lnTZbYPl1A9q1BS0p4hduAv1mGAP17CVd0=",
|
|
||||||
"owner": "NixOS",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"rev": "b8697e57f10292a6165a20f03d2f42920dfaf973",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "NixOS",
|
|
||||||
"ref": "nixos-unstable",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"root": {
|
|
||||||
"inputs": {
|
|
||||||
"attic": "attic",
|
|
||||||
"crane": "crane_2",
|
|
||||||
"fenix": "fenix",
|
|
||||||
"flake-compat": "flake-compat_2",
|
|
||||||
"flake-utils": "flake-utils_2",
|
|
||||||
"nix-filter": "nix-filter",
|
|
||||||
"nixpkgs": "nixpkgs_2"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"rust-analyzer-src": {
|
|
||||||
"flake": false,
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1709571018,
|
|
||||||
"narHash": "sha256-ISFrxHxE0J5g7lDAscbK88hwaT5uewvWoma9TlFmRzM=",
|
|
||||||
"owner": "rust-lang",
|
|
||||||
"repo": "rust-analyzer",
|
|
||||||
"rev": "9f14343f9ee24f53f17492c5f9b653427e2ad15e",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "rust-lang",
|
|
||||||
"ref": "nightly",
|
|
||||||
"repo": "rust-analyzer",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"systems": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1681028828,
|
|
||||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
|
||||||
"owner": "nix-systems",
|
|
||||||
"repo": "default",
|
|
||||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "nix-systems",
|
|
||||||
"repo": "default",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"root": "root",
|
|
||||||
"version": 7
|
|
||||||
}
|
|
115
flake.nix
115
flake.nix
|
@ -1,115 +0,0 @@
|
||||||
{
|
|
||||||
inputs = {
|
|
||||||
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable";
|
|
||||||
flake-utils.url = "github:numtide/flake-utils";
|
|
||||||
nix-filter.url = "github:numtide/nix-filter";
|
|
||||||
flake-compat = {
|
|
||||||
url = "github:edolstra/flake-compat";
|
|
||||||
flake = false;
|
|
||||||
};
|
|
||||||
|
|
||||||
fenix = {
|
|
||||||
url = "github:nix-community/fenix";
|
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
|
||||||
};
|
|
||||||
crane = {
|
|
||||||
url = "github:ipetkov/crane?ref=master";
|
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
|
||||||
};
|
|
||||||
attic.url = "github:zhaofengli/attic?ref=main";
|
|
||||||
};
|
|
||||||
|
|
||||||
outputs = inputs:
|
|
||||||
let
|
|
||||||
# Keep sorted
|
|
||||||
mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: {
|
|
||||||
craneLib =
|
|
||||||
(inputs.crane.mkLib pkgs).overrideToolchain self.toolchain;
|
|
||||||
|
|
||||||
default = self.callPackage ./nix/pkgs/default {};
|
|
||||||
|
|
||||||
inherit inputs;
|
|
||||||
|
|
||||||
oci-image = self.callPackage ./nix/pkgs/oci-image {};
|
|
||||||
|
|
||||||
book = self.callPackage ./nix/pkgs/book {};
|
|
||||||
|
|
||||||
rocksdb =
|
|
||||||
let
|
|
||||||
version = "9.1.1";
|
|
||||||
in
|
|
||||||
pkgs.rocksdb.overrideAttrs (old: {
|
|
||||||
inherit version;
|
|
||||||
src = pkgs.fetchFromGitHub {
|
|
||||||
owner = "facebook";
|
|
||||||
repo = "rocksdb";
|
|
||||||
rev = "v${version}";
|
|
||||||
hash = "sha256-/Xf0bzNJPclH9IP80QNaABfhj4IAR5LycYET18VFCXc=";
|
|
||||||
};
|
|
||||||
});
|
|
||||||
|
|
||||||
shell = self.callPackage ./nix/shell.nix {};
|
|
||||||
|
|
||||||
# The Rust toolchain to use
|
|
||||||
toolchain = inputs
|
|
||||||
.fenix
|
|
||||||
.packages
|
|
||||||
.${pkgs.pkgsBuildHost.system}
|
|
||||||
.fromToolchainFile {
|
|
||||||
file = ./rust-toolchain.toml;
|
|
||||||
|
|
||||||
# See also `rust-toolchain.toml`
|
|
||||||
sha256 = "sha256-Ngiz76YP4HTY75GGdH2P+APE/DEIx2R/Dn+BwwOyzZU=";
|
|
||||||
};
|
|
||||||
});
|
|
||||||
in
|
|
||||||
inputs.flake-utils.lib.eachDefaultSystem (system:
|
|
||||||
let
|
|
||||||
pkgs = inputs.nixpkgs.legacyPackages.${system};
|
|
||||||
in
|
|
||||||
{
|
|
||||||
packages = {
|
|
||||||
default = (mkScope pkgs).default;
|
|
||||||
oci-image = (mkScope pkgs).oci-image;
|
|
||||||
book = (mkScope pkgs).book;
|
|
||||||
}
|
|
||||||
//
|
|
||||||
builtins.listToAttrs
|
|
||||||
(builtins.concatLists
|
|
||||||
(builtins.map
|
|
||||||
(crossSystem:
|
|
||||||
let
|
|
||||||
binaryName = "static-${crossSystem}";
|
|
||||||
pkgsCrossStatic =
|
|
||||||
(import inputs.nixpkgs {
|
|
||||||
inherit system;
|
|
||||||
crossSystem = {
|
|
||||||
config = crossSystem;
|
|
||||||
};
|
|
||||||
}).pkgsStatic;
|
|
||||||
in
|
|
||||||
[
|
|
||||||
# An output for a statically-linked binary
|
|
||||||
{
|
|
||||||
name = binaryName;
|
|
||||||
value = (mkScope pkgsCrossStatic).default;
|
|
||||||
}
|
|
||||||
|
|
||||||
# An output for an OCI image based on that binary
|
|
||||||
{
|
|
||||||
name = "oci-image-${crossSystem}";
|
|
||||||
value = (mkScope pkgsCrossStatic).oci-image;
|
|
||||||
}
|
|
||||||
]
|
|
||||||
)
|
|
||||||
[
|
|
||||||
"x86_64-unknown-linux-musl"
|
|
||||||
"aarch64-unknown-linux-musl"
|
|
||||||
]
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
devShells.default = (mkScope pkgs).shell;
|
|
||||||
}
|
|
||||||
);
|
|
||||||
}
|
|
|
@ -1,34 +0,0 @@
|
||||||
# Keep sorted
|
|
||||||
{ default
|
|
||||||
, inputs
|
|
||||||
, mdbook
|
|
||||||
, stdenv
|
|
||||||
}:
|
|
||||||
|
|
||||||
stdenv.mkDerivation {
|
|
||||||
pname = "${default.pname}-book";
|
|
||||||
version = default.version;
|
|
||||||
|
|
||||||
|
|
||||||
src = let filter = inputs.nix-filter.lib; in filter {
|
|
||||||
root = inputs.self;
|
|
||||||
|
|
||||||
# Keep sorted
|
|
||||||
include = [
|
|
||||||
"book.toml"
|
|
||||||
"conduit-example.toml"
|
|
||||||
"debian/README.md"
|
|
||||||
"docs"
|
|
||||||
"README.md"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
nativeBuildInputs = [
|
|
||||||
mdbook
|
|
||||||
];
|
|
||||||
|
|
||||||
buildPhase = ''
|
|
||||||
mdbook build
|
|
||||||
mv public $out
|
|
||||||
'';
|
|
||||||
}
|
|
|
@ -1,100 +0,0 @@
|
||||||
{ lib
|
|
||||||
, pkgsBuildHost
|
|
||||||
, rust
|
|
||||||
, stdenv
|
|
||||||
}:
|
|
||||||
|
|
||||||
lib.optionalAttrs stdenv.hostPlatform.isStatic {
|
|
||||||
ROCKSDB_STATIC = "";
|
|
||||||
}
|
|
||||||
//
|
|
||||||
{
|
|
||||||
CARGO_BUILD_RUSTFLAGS =
|
|
||||||
lib.concatStringsSep
|
|
||||||
" "
|
|
||||||
([]
|
|
||||||
# This disables PIE for static builds, which isn't great in terms of
|
|
||||||
# security. Unfortunately, my hand is forced because nixpkgs'
|
|
||||||
# `libstdc++.a` is built without `-fPIE`, which precludes us from
|
|
||||||
# leaving PIE enabled.
|
|
||||||
++ lib.optionals
|
|
||||||
stdenv.hostPlatform.isStatic
|
|
||||||
[ "-C" "relocation-model=static" ]
|
|
||||||
++ lib.optionals
|
|
||||||
(stdenv.buildPlatform.config != stdenv.hostPlatform.config)
|
|
||||||
[ "-l" "c" ]
|
|
||||||
++ lib.optionals
|
|
||||||
# This check has to match the one [here][0]. We only need to set
|
|
||||||
# these flags when using a different linker. Don't ask me why, though,
|
|
||||||
# because I don't know. All I know is it breaks otherwise.
|
|
||||||
#
|
|
||||||
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L37-L40
|
|
||||||
(
|
|
||||||
# Nixpkgs doesn't check for x86_64 here but we do, because I
|
|
||||||
# observed a failure building statically for x86_64 without
|
|
||||||
# including it here. Linkers are weird.
|
|
||||||
(stdenv.hostPlatform.isAarch64 || stdenv.hostPlatform.isx86_64)
|
|
||||||
&& stdenv.hostPlatform.isStatic
|
|
||||||
&& !stdenv.isDarwin
|
|
||||||
&& !stdenv.cc.bintools.isLLVM
|
|
||||||
)
|
|
||||||
[
|
|
||||||
"-l"
|
|
||||||
"stdc++"
|
|
||||||
"-L"
|
|
||||||
"${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib"
|
|
||||||
]
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
# What follows is stolen from [here][0]. Its purpose is to properly configure
|
|
||||||
# compilers and linkers for various stages of the build, and even covers the
|
|
||||||
# case of build scripts that need native code compiled and run on the build
|
|
||||||
# platform (I think).
|
|
||||||
#
|
|
||||||
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L57-L80
|
|
||||||
//
|
|
||||||
(
|
|
||||||
let
|
|
||||||
inherit (rust.lib) envVars;
|
|
||||||
in
|
|
||||||
lib.optionalAttrs
|
|
||||||
(stdenv.targetPlatform.rust.rustcTarget
|
|
||||||
!= stdenv.hostPlatform.rust.rustcTarget)
|
|
||||||
(
|
|
||||||
let
|
|
||||||
inherit (stdenv.targetPlatform.rust) cargoEnvVarTarget;
|
|
||||||
in
|
|
||||||
{
|
|
||||||
"CC_${cargoEnvVarTarget}" = envVars.ccForTarget;
|
|
||||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget;
|
|
||||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" =
|
|
||||||
envVars.linkerForTarget;
|
|
||||||
}
|
|
||||||
)
|
|
||||||
//
|
|
||||||
(
|
|
||||||
let
|
|
||||||
inherit (stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget;
|
|
||||||
in
|
|
||||||
{
|
|
||||||
"CC_${cargoEnvVarTarget}" = envVars.ccForHost;
|
|
||||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForHost;
|
|
||||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost;
|
|
||||||
CARGO_BUILD_TARGET = rustcTarget;
|
|
||||||
}
|
|
||||||
)
|
|
||||||
//
|
|
||||||
(
|
|
||||||
let
|
|
||||||
inherit (stdenv.buildPlatform.rust) cargoEnvVarTarget;
|
|
||||||
in
|
|
||||||
{
|
|
||||||
"CC_${cargoEnvVarTarget}" = envVars.ccForBuild;
|
|
||||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild;
|
|
||||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild;
|
|
||||||
HOST_CC = "${pkgsBuildHost.stdenv.cc}/bin/cc";
|
|
||||||
HOST_CXX = "${pkgsBuildHost.stdenv.cc}/bin/c++";
|
|
||||||
}
|
|
||||||
)
|
|
||||||
)
|
|
|
@ -1,95 +0,0 @@
|
||||||
# Dependencies (keep sorted)
|
|
||||||
{ craneLib
|
|
||||||
, inputs
|
|
||||||
, lib
|
|
||||||
, pkgsBuildHost
|
|
||||||
, rocksdb
|
|
||||||
, rust
|
|
||||||
, stdenv
|
|
||||||
|
|
||||||
# Options (keep sorted)
|
|
||||||
, default-features ? true
|
|
||||||
, features ? []
|
|
||||||
, profile ? "release"
|
|
||||||
}:
|
|
||||||
|
|
||||||
let
|
|
||||||
buildDepsOnlyEnv =
|
|
||||||
let
|
|
||||||
rocksdb' = rocksdb.override {
|
|
||||||
enableJemalloc = builtins.elem "jemalloc" features;
|
|
||||||
};
|
|
||||||
in
|
|
||||||
{
|
|
||||||
NIX_OUTPATH_USED_AS_RANDOM_SEED = "randomseed"; # https://crane.dev/faq/rebuilds-bindgen.html
|
|
||||||
ROCKSDB_INCLUDE_DIR = "${rocksdb'}/include";
|
|
||||||
ROCKSDB_LIB_DIR = "${rocksdb'}/lib";
|
|
||||||
}
|
|
||||||
//
|
|
||||||
(import ./cross-compilation-env.nix {
|
|
||||||
# Keep sorted
|
|
||||||
inherit
|
|
||||||
lib
|
|
||||||
pkgsBuildHost
|
|
||||||
rust
|
|
||||||
stdenv;
|
|
||||||
});
|
|
||||||
|
|
||||||
buildPackageEnv = {
|
|
||||||
CONDUIT_VERSION_EXTRA = inputs.self.shortRev or inputs.self.dirtyShortRev;
|
|
||||||
} // buildDepsOnlyEnv;
|
|
||||||
|
|
||||||
commonAttrs = {
|
|
||||||
inherit
|
|
||||||
(craneLib.crateNameFromCargoToml {
|
|
||||||
cargoToml = "${inputs.self}/Cargo.toml";
|
|
||||||
})
|
|
||||||
pname
|
|
||||||
version;
|
|
||||||
|
|
||||||
src = let filter = inputs.nix-filter.lib; in filter {
|
|
||||||
root = inputs.self;
|
|
||||||
|
|
||||||
# Keep sorted
|
|
||||||
include = [
|
|
||||||
"Cargo.lock"
|
|
||||||
"Cargo.toml"
|
|
||||||
"src"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
nativeBuildInputs = [
|
|
||||||
# bindgen needs the build platform's libclang. Apparently due to "splicing
|
|
||||||
# weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't quite do the
|
|
||||||
# right thing here.
|
|
||||||
pkgsBuildHost.rustPlatform.bindgenHook
|
|
||||||
];
|
|
||||||
|
|
||||||
CARGO_PROFILE = profile;
|
|
||||||
};
|
|
||||||
in
|
|
||||||
|
|
||||||
craneLib.buildPackage ( commonAttrs // {
|
|
||||||
cargoArtifacts = craneLib.buildDepsOnly (commonAttrs // {
|
|
||||||
env = buildDepsOnlyEnv;
|
|
||||||
});
|
|
||||||
|
|
||||||
cargoExtraArgs = "--locked "
|
|
||||||
+ lib.optionalString
|
|
||||||
(!default-features)
|
|
||||||
"--no-default-features "
|
|
||||||
+ lib.optionalString
|
|
||||||
(features != [])
|
|
||||||
"--features " + (builtins.concatStringsSep "," features);
|
|
||||||
|
|
||||||
# This is redundant with CI
|
|
||||||
doCheck = false;
|
|
||||||
|
|
||||||
env = buildPackageEnv;
|
|
||||||
|
|
||||||
passthru = {
|
|
||||||
env = buildPackageEnv;
|
|
||||||
};
|
|
||||||
|
|
||||||
meta.mainProgram = commonAttrs.pname;
|
|
||||||
})
|
|
|
@ -1,25 +0,0 @@
|
||||||
# Keep sorted
|
|
||||||
{ default
|
|
||||||
, dockerTools
|
|
||||||
, lib
|
|
||||||
, tini
|
|
||||||
}:
|
|
||||||
|
|
||||||
dockerTools.buildImage {
|
|
||||||
name = default.pname;
|
|
||||||
tag = "next";
|
|
||||||
copyToRoot = [
|
|
||||||
dockerTools.caCertificates
|
|
||||||
];
|
|
||||||
config = {
|
|
||||||
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT)
|
|
||||||
# are handled as expected
|
|
||||||
Entrypoint = [
|
|
||||||
"${lib.getExe' tini "tini"}"
|
|
||||||
"--"
|
|
||||||
];
|
|
||||||
Cmd = [
|
|
||||||
"${lib.getExe default}"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
}
|
|
|
@ -1,61 +0,0 @@
|
||||||
# Keep sorted
|
|
||||||
{ cargo-deb
|
|
||||||
, default
|
|
||||||
, engage
|
|
||||||
, go
|
|
||||||
, inputs
|
|
||||||
, jq
|
|
||||||
, lychee
|
|
||||||
, mdbook
|
|
||||||
, mkShell
|
|
||||||
, olm
|
|
||||||
, system
|
|
||||||
, taplo
|
|
||||||
, toolchain
|
|
||||||
}:
|
|
||||||
|
|
||||||
mkShell {
|
|
||||||
env = default.env // {
|
|
||||||
# Rust Analyzer needs to be able to find the path to default crate
|
|
||||||
# sources, and it can read this environment variable to do so. The
|
|
||||||
# `rust-src` component is required in order for this to work.
|
|
||||||
RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library";
|
|
||||||
};
|
|
||||||
|
|
||||||
# Development tools
|
|
||||||
nativeBuildInputs = [
|
|
||||||
# Always use nightly rustfmt because most of its options are unstable
|
|
||||||
#
|
|
||||||
# This needs to come before `toolchain` in this list, otherwise
|
|
||||||
# `$PATH` will have stable rustfmt instead.
|
|
||||||
inputs.fenix.packages.${system}.latest.rustfmt
|
|
||||||
|
|
||||||
# rust itself
|
|
||||||
toolchain
|
|
||||||
|
|
||||||
# CI tests
|
|
||||||
engage
|
|
||||||
|
|
||||||
# format toml files
|
|
||||||
taplo
|
|
||||||
|
|
||||||
# Needed for producing Debian packages
|
|
||||||
cargo-deb
|
|
||||||
|
|
||||||
# Needed for our script for Complement
|
|
||||||
jq
|
|
||||||
|
|
||||||
# Needed for Complement
|
|
||||||
go
|
|
||||||
olm
|
|
||||||
|
|
||||||
# Needed for our script for Complement
|
|
||||||
jq
|
|
||||||
|
|
||||||
# Needed for finding broken markdown links
|
|
||||||
lychee
|
|
||||||
|
|
||||||
# Useful for editing the book locally
|
|
||||||
mdbook
|
|
||||||
] ++ default.nativeBuildInputs ;
|
|
||||||
}
|
|
1
rust-toolchain
Normal file
1
rust-toolchain
Normal file
|
@ -0,0 +1 @@
|
||||||
|
1.52
|
|
@ -1,21 +0,0 @@
|
||||||
# This is the authoritiative configuration of this project's Rust toolchain.
|
|
||||||
#
|
|
||||||
# Other files that need upkeep when this changes:
|
|
||||||
#
|
|
||||||
# * `Cargo.toml`
|
|
||||||
# * `flake.nix`
|
|
||||||
#
|
|
||||||
# Search in those files for `rust-toolchain.toml` to find the relevant places.
|
|
||||||
# If you're having trouble making the relevant changes, bug a maintainer.
|
|
||||||
|
|
||||||
[toolchain]
|
|
||||||
channel = "1.79.0"
|
|
||||||
components = [
|
|
||||||
# For rust-analyzer
|
|
||||||
"rust-src",
|
|
||||||
]
|
|
||||||
targets = [
|
|
||||||
"aarch64-unknown-linux-musl",
|
|
||||||
"x86_64-unknown-linux-gnu",
|
|
||||||
"x86_64-unknown-linux-musl",
|
|
||||||
]
|
|
|
@ -1,2 +1,2 @@
|
||||||
imports_granularity = "Crate"
|
|
||||||
unstable_features = true
|
unstable_features = true
|
||||||
|
imports_granularity="Crate"
|
||||||
|
|
|
@ -1,502 +0,0 @@
|
||||||
use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH};
|
|
||||||
use crate::{api::client_server, services, utils, Error, Result, Ruma};
|
|
||||||
use ruma::{
|
|
||||||
api::client::{
|
|
||||||
account::{
|
|
||||||
change_password, deactivate, get_3pids, get_username_availability,
|
|
||||||
register::{self, LoginType},
|
|
||||||
request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn,
|
|
||||||
whoami, ThirdPartyIdRemovalStatus,
|
|
||||||
},
|
|
||||||
error::ErrorKind,
|
|
||||||
uiaa::{AuthFlow, AuthType, UiaaInfo},
|
|
||||||
},
|
|
||||||
events::{room::message::RoomMessageEventContent, GlobalAccountDataEventType},
|
|
||||||
push, UserId,
|
|
||||||
};
|
|
||||||
use tracing::{info, warn};
|
|
||||||
|
|
||||||
use register::RegistrationKind;
|
|
||||||
|
|
||||||
const RANDOM_USER_ID_LENGTH: usize = 10;
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/register/available`
|
|
||||||
///
|
|
||||||
/// Checks if a username is valid and available on this server.
|
|
||||||
///
|
|
||||||
/// Conditions for returning true:
|
|
||||||
/// - The user id is not historical
|
|
||||||
/// - The server name of the user id matches this server
|
|
||||||
/// - No user or appservice on this server already claimed this username
|
|
||||||
///
|
|
||||||
/// Note: This will not reserve the username, so the username might become invalid when trying to register
|
|
||||||
pub async fn get_register_available_route(
|
|
||||||
body: Ruma<get_username_availability::v3::Request>,
|
|
||||||
) -> Result<get_username_availability::v3::Response> {
|
|
||||||
// Validate user id
|
|
||||||
let user_id = UserId::parse_with_server_name(
|
|
||||||
body.username.to_lowercase(),
|
|
||||||
services().globals.server_name(),
|
|
||||||
)
|
|
||||||
.ok()
|
|
||||||
.filter(|user_id| {
|
|
||||||
!user_id.is_historical() && user_id.server_name() == services().globals.server_name()
|
|
||||||
})
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidUsername,
|
|
||||||
"Username is invalid.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
// Check if username is creative enough
|
|
||||||
if services().users.exists(&user_id)? {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::UserInUse,
|
|
||||||
"Desired user ID is already taken.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO add check for appservice namespaces
|
|
||||||
|
|
||||||
// If no if check is true we have an username that's available to be used.
|
|
||||||
Ok(get_username_availability::v3::Response { available: true })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/register`
|
|
||||||
///
|
|
||||||
/// Register an account on this homeserver.
|
|
||||||
///
|
|
||||||
/// You can use [`GET /_matrix/client/r0/register/available`](fn.get_register_available_route.html)
|
|
||||||
/// to check if the user id is valid and available.
|
|
||||||
///
|
|
||||||
/// - Only works if registration is enabled
|
|
||||||
/// - If type is guest: ignores all parameters except initial_device_display_name
|
|
||||||
/// - If sender is not appservice: Requires UIAA (but we only use a dummy stage)
|
|
||||||
/// - If type is not guest and no username is given: Always fails after UIAA check
|
|
||||||
/// - Creates a new account and populates it with default account data
|
|
||||||
/// - If `inhibit_login` is false: Creates a device and returns device id and access_token
|
|
||||||
pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<register::v3::Response> {
|
|
||||||
if !services().globals.allow_registration().await && body.appservice_info.is_none() {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::forbidden(),
|
|
||||||
"Registration has been disabled.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let is_guest = body.kind == RegistrationKind::Guest;
|
|
||||||
|
|
||||||
let user_id = match (&body.username, is_guest) {
|
|
||||||
(Some(username), false) => {
|
|
||||||
let proposed_user_id = UserId::parse_with_server_name(
|
|
||||||
username.to_lowercase(),
|
|
||||||
services().globals.server_name(),
|
|
||||||
)
|
|
||||||
.ok()
|
|
||||||
.filter(|user_id| {
|
|
||||||
!user_id.is_historical()
|
|
||||||
&& user_id.server_name() == services().globals.server_name()
|
|
||||||
})
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidUsername,
|
|
||||||
"Username is invalid.",
|
|
||||||
))?;
|
|
||||||
if services().users.exists(&proposed_user_id)? {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::UserInUse,
|
|
||||||
"Desired user ID is already taken.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
proposed_user_id
|
|
||||||
}
|
|
||||||
_ => loop {
|
|
||||||
let proposed_user_id = UserId::parse_with_server_name(
|
|
||||||
utils::random_string(RANDOM_USER_ID_LENGTH).to_lowercase(),
|
|
||||||
services().globals.server_name(),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
if !services().users.exists(&proposed_user_id)? {
|
|
||||||
break proposed_user_id;
|
|
||||||
}
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
if body.body.login_type == Some(LoginType::ApplicationService) {
|
|
||||||
if let Some(ref info) = body.appservice_info {
|
|
||||||
if !info.is_user_match(&user_id) {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"User is not in namespace.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::MissingToken,
|
|
||||||
"Missing appservice token.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
} else if services().appservice.is_exclusive_user_id(&user_id).await {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"User id reserved by appservice.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
// UIAA
|
|
||||||
let mut uiaainfo;
|
|
||||||
let skip_auth = if services().globals.config.registration_token.is_some() {
|
|
||||||
// Registration token required
|
|
||||||
uiaainfo = UiaaInfo {
|
|
||||||
flows: vec![AuthFlow {
|
|
||||||
stages: vec![AuthType::RegistrationToken],
|
|
||||||
}],
|
|
||||||
completed: Vec::new(),
|
|
||||||
params: Default::default(),
|
|
||||||
session: None,
|
|
||||||
auth_error: None,
|
|
||||||
};
|
|
||||||
body.appservice_info.is_some()
|
|
||||||
} else {
|
|
||||||
// No registration token necessary, but clients must still go through the flow
|
|
||||||
uiaainfo = UiaaInfo {
|
|
||||||
flows: vec![AuthFlow {
|
|
||||||
stages: vec![AuthType::Dummy],
|
|
||||||
}],
|
|
||||||
completed: Vec::new(),
|
|
||||||
params: Default::default(),
|
|
||||||
session: None,
|
|
||||||
auth_error: None,
|
|
||||||
};
|
|
||||||
body.appservice_info.is_some() || is_guest
|
|
||||||
};
|
|
||||||
|
|
||||||
if !skip_auth {
|
|
||||||
if let Some(auth) = &body.auth {
|
|
||||||
let (worked, uiaainfo) = services().uiaa.try_auth(
|
|
||||||
&UserId::parse_with_server_name("", services().globals.server_name())
|
|
||||||
.expect("we know this is valid"),
|
|
||||||
"".into(),
|
|
||||||
auth,
|
|
||||||
&uiaainfo,
|
|
||||||
)?;
|
|
||||||
if !worked {
|
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
|
||||||
}
|
|
||||||
// Success!
|
|
||||||
} else if let Some(json) = body.json_body {
|
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
|
||||||
services().uiaa.create(
|
|
||||||
&UserId::parse_with_server_name("", services().globals.server_name())
|
|
||||||
.expect("we know this is valid"),
|
|
||||||
"".into(),
|
|
||||||
&uiaainfo,
|
|
||||||
&json,
|
|
||||||
)?;
|
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
|
||||||
} else {
|
|
||||||
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let password = if is_guest {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
body.password.as_deref()
|
|
||||||
};
|
|
||||||
|
|
||||||
// Create user
|
|
||||||
services().users.create(&user_id, password)?;
|
|
||||||
|
|
||||||
// Default to pretty displayname
|
|
||||||
let mut displayname = user_id.localpart().to_owned();
|
|
||||||
|
|
||||||
// If enabled append lightning bolt to display name (default true)
|
|
||||||
if services().globals.enable_lightning_bolt() {
|
|
||||||
displayname.push_str(" ⚡️");
|
|
||||||
}
|
|
||||||
|
|
||||||
services()
|
|
||||||
.users
|
|
||||||
.set_displayname(&user_id, Some(displayname.clone()))?;
|
|
||||||
|
|
||||||
// Initial account data
|
|
||||||
services().account_data.update(
|
|
||||||
None,
|
|
||||||
&user_id,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
&serde_json::to_value(ruma::events::push_rules::PushRulesEvent {
|
|
||||||
content: ruma::events::push_rules::PushRulesEventContent {
|
|
||||||
global: push::Ruleset::server_default(&user_id),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
.expect("to json always works"),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// Inhibit login does not work for guests
|
|
||||||
if !is_guest && body.inhibit_login {
|
|
||||||
return Ok(register::v3::Response {
|
|
||||||
access_token: None,
|
|
||||||
user_id,
|
|
||||||
device_id: None,
|
|
||||||
refresh_token: None,
|
|
||||||
expires_in: None,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate new device id if the user didn't specify one
|
|
||||||
let device_id = if is_guest {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
body.device_id.clone()
|
|
||||||
}
|
|
||||||
.unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into());
|
|
||||||
|
|
||||||
// Generate new token for the device
|
|
||||||
let token = utils::random_string(TOKEN_LENGTH);
|
|
||||||
|
|
||||||
// Create device for this account
|
|
||||||
services().users.create_device(
|
|
||||||
&user_id,
|
|
||||||
&device_id,
|
|
||||||
&token,
|
|
||||||
body.initial_device_display_name.clone(),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
info!("New user {} registered on this server.", user_id);
|
|
||||||
if body.appservice_info.is_none() && !is_guest {
|
|
||||||
services()
|
|
||||||
.admin
|
|
||||||
.send_message(RoomMessageEventContent::notice_plain(format!(
|
|
||||||
"New user {user_id} registered on this server."
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
// If this is the first real user, grant them admin privileges
|
|
||||||
// Note: the server user, @conduit:servername, is generated first
|
|
||||||
if !is_guest {
|
|
||||||
if let Some(admin_room) = services().admin.get_admin_room()? {
|
|
||||||
if services()
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.room_joined_count(&admin_room)?
|
|
||||||
== Some(1)
|
|
||||||
{
|
|
||||||
services()
|
|
||||||
.admin
|
|
||||||
.make_user_admin(&user_id, displayname)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
warn!("Granting {} admin privileges as the first user", user_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(register::v3::Response {
|
|
||||||
access_token: Some(token),
|
|
||||||
user_id,
|
|
||||||
device_id: Some(device_id),
|
|
||||||
refresh_token: None,
|
|
||||||
expires_in: None,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/account/password`
|
|
||||||
///
|
|
||||||
/// Changes the password of this account.
|
|
||||||
///
|
|
||||||
/// - Requires UIAA to verify user password
|
|
||||||
/// - Changes the password of the sender user
|
|
||||||
/// - The password hash is calculated using argon2 with 32 character salt, the plain password is
|
|
||||||
/// not saved
|
|
||||||
///
|
|
||||||
/// If logout_devices is true it does the following for each device except the sender device:
|
|
||||||
/// - Invalidates access token
|
|
||||||
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
|
|
||||||
/// - Forgets to-device events
|
|
||||||
/// - Triggers device list updates
|
|
||||||
pub async fn change_password_route(
|
|
||||||
body: Ruma<change_password::v3::Request>,
|
|
||||||
) -> Result<change_password::v3::Response> {
|
|
||||||
let sender_user = body
|
|
||||||
.sender_user
|
|
||||||
.as_ref()
|
|
||||||
// In the future password changes could be performed with UIA with 3PIDs, but we don't support that currently
|
|
||||||
.ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?;
|
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let mut uiaainfo = UiaaInfo {
|
|
||||||
flows: vec![AuthFlow {
|
|
||||||
stages: vec![AuthType::Password],
|
|
||||||
}],
|
|
||||||
completed: Vec::new(),
|
|
||||||
params: Default::default(),
|
|
||||||
session: None,
|
|
||||||
auth_error: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(auth) = &body.auth {
|
|
||||||
let (worked, uiaainfo) =
|
|
||||||
services()
|
|
||||||
.uiaa
|
|
||||||
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
|
|
||||||
if !worked {
|
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
|
||||||
}
|
|
||||||
// Success!
|
|
||||||
} else if let Some(json) = body.json_body {
|
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
|
||||||
services()
|
|
||||||
.uiaa
|
|
||||||
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
|
||||||
} else {
|
|
||||||
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
|
||||||
}
|
|
||||||
|
|
||||||
services()
|
|
||||||
.users
|
|
||||||
.set_password(sender_user, Some(&body.new_password))?;
|
|
||||||
|
|
||||||
if body.logout_devices {
|
|
||||||
// Logout all devices except the current one
|
|
||||||
for id in services()
|
|
||||||
.users
|
|
||||||
.all_device_ids(sender_user)
|
|
||||||
.filter_map(|id| id.ok())
|
|
||||||
.filter(|id| id != sender_device)
|
|
||||||
{
|
|
||||||
services().users.remove_device(sender_user, &id)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("User {} changed their password.", sender_user);
|
|
||||||
services()
|
|
||||||
.admin
|
|
||||||
.send_message(RoomMessageEventContent::notice_plain(format!(
|
|
||||||
"User {sender_user} changed their password."
|
|
||||||
)));
|
|
||||||
|
|
||||||
Ok(change_password::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET _matrix/client/r0/account/whoami`
|
|
||||||
///
|
|
||||||
/// Get user_id of the sender user.
|
|
||||||
///
|
|
||||||
/// Note: Also works for Application Services
|
|
||||||
pub async fn whoami_route(body: Ruma<whoami::v3::Request>) -> Result<whoami::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let device_id = body.sender_device.as_ref().cloned();
|
|
||||||
|
|
||||||
Ok(whoami::v3::Response {
|
|
||||||
user_id: sender_user.clone(),
|
|
||||||
device_id,
|
|
||||||
is_guest: services().users.is_deactivated(sender_user)? && body.appservice_info.is_none(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/account/deactivate`
|
|
||||||
///
|
|
||||||
/// Deactivate sender user account.
|
|
||||||
///
|
|
||||||
/// - Leaves all rooms and rejects all invitations
|
|
||||||
/// - Invalidates all access tokens
|
|
||||||
/// - Deletes all device metadata (device id, device display name, last seen ip, last seen ts)
|
|
||||||
/// - Forgets all to-device events
|
|
||||||
/// - Triggers device list updates
|
|
||||||
/// - Removes ability to log in again
|
|
||||||
pub async fn deactivate_route(
|
|
||||||
body: Ruma<deactivate::v3::Request>,
|
|
||||||
) -> Result<deactivate::v3::Response> {
|
|
||||||
let sender_user = body
|
|
||||||
.sender_user
|
|
||||||
.as_ref()
|
|
||||||
// In the future password changes could be performed with UIA with SSO, but we don't support that currently
|
|
||||||
.ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?;
|
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let mut uiaainfo = UiaaInfo {
|
|
||||||
flows: vec![AuthFlow {
|
|
||||||
stages: vec![AuthType::Password],
|
|
||||||
}],
|
|
||||||
completed: Vec::new(),
|
|
||||||
params: Default::default(),
|
|
||||||
session: None,
|
|
||||||
auth_error: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(auth) = &body.auth {
|
|
||||||
let (worked, uiaainfo) =
|
|
||||||
services()
|
|
||||||
.uiaa
|
|
||||||
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
|
|
||||||
if !worked {
|
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
|
||||||
}
|
|
||||||
// Success!
|
|
||||||
} else if let Some(json) = body.json_body {
|
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
|
||||||
services()
|
|
||||||
.uiaa
|
|
||||||
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
|
||||||
} else {
|
|
||||||
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make the user leave all rooms before deactivation
|
|
||||||
client_server::leave_all_rooms(sender_user).await?;
|
|
||||||
|
|
||||||
// Remove devices and mark account as deactivated
|
|
||||||
services().users.deactivate_account(sender_user)?;
|
|
||||||
|
|
||||||
info!("User {} deactivated their account.", sender_user);
|
|
||||||
services()
|
|
||||||
.admin
|
|
||||||
.send_message(RoomMessageEventContent::notice_plain(format!(
|
|
||||||
"User {sender_user} deactivated their account."
|
|
||||||
)));
|
|
||||||
|
|
||||||
Ok(deactivate::v3::Response {
|
|
||||||
id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET _matrix/client/v3/account/3pid`
|
|
||||||
///
|
|
||||||
/// Get a list of third party identifiers associated with this account.
|
|
||||||
///
|
|
||||||
/// - Currently always returns empty list
|
|
||||||
pub async fn third_party_route(
|
|
||||||
body: Ruma<get_3pids::v3::Request>,
|
|
||||||
) -> Result<get_3pids::v3::Response> {
|
|
||||||
let _sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
Ok(get_3pids::v3::Response::new(Vec::new()))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/v3/account/3pid/email/requestToken`
|
|
||||||
///
|
|
||||||
/// "This API should be used to request validation tokens when adding an email address to an account"
|
|
||||||
///
|
|
||||||
/// - 403 signals that The homeserver does not allow the third party identifier as a contact option.
|
|
||||||
pub async fn request_3pid_management_token_via_email_route(
|
|
||||||
_body: Ruma<request_3pid_management_token_via_email::v3::Request>,
|
|
||||||
) -> Result<request_3pid_management_token_via_email::v3::Response> {
|
|
||||||
Err(Error::BadRequest(
|
|
||||||
ErrorKind::ThreepidDenied,
|
|
||||||
"Third party identifiers are currently unsupported by this server implementation",
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/v3/account/3pid/msisdn/requestToken`
|
|
||||||
///
|
|
||||||
/// "This API should be used to request validation tokens when adding an phone number to an account"
|
|
||||||
///
|
|
||||||
/// - 403 signals that The homeserver does not allow the third party identifier as a contact option.
|
|
||||||
pub async fn request_3pid_management_token_via_msisdn_route(
|
|
||||||
_body: Ruma<request_3pid_management_token_via_msisdn::v3::Request>,
|
|
||||||
) -> Result<request_3pid_management_token_via_msisdn::v3::Response> {
|
|
||||||
Err(Error::BadRequest(
|
|
||||||
ErrorKind::ThreepidDenied,
|
|
||||||
"Third party identifiers are currently unsupported by this server implementation",
|
|
||||||
))
|
|
||||||
}
|
|
|
@ -1,189 +0,0 @@
|
||||||
use crate::{services, Error, Result, Ruma};
|
|
||||||
use rand::seq::SliceRandom;
|
|
||||||
use ruma::{
|
|
||||||
api::{
|
|
||||||
appservice,
|
|
||||||
client::{
|
|
||||||
alias::{create_alias, delete_alias, get_alias},
|
|
||||||
error::ErrorKind,
|
|
||||||
},
|
|
||||||
federation,
|
|
||||||
},
|
|
||||||
OwnedRoomAliasId,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/directory/room/{roomAlias}`
|
|
||||||
///
|
|
||||||
/// Creates a new room alias on this server.
|
|
||||||
pub async fn create_alias_route(
|
|
||||||
body: Ruma<create_alias::v3::Request>,
|
|
||||||
) -> Result<create_alias::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if body.room_alias.server_name() != services().globals.server_name() {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Alias is from another server.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(ref info) = body.appservice_info {
|
|
||||||
if !info.aliases.is_match(body.room_alias.as_str()) {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"Room alias is not in namespace.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
} else if services()
|
|
||||||
.appservice
|
|
||||||
.is_exclusive_alias(&body.room_alias)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"Room alias reserved by appservice.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
if services()
|
|
||||||
.rooms
|
|
||||||
.alias
|
|
||||||
.resolve_local_alias(&body.room_alias)?
|
|
||||||
.is_some()
|
|
||||||
{
|
|
||||||
return Err(Error::Conflict("Alias already exists."));
|
|
||||||
}
|
|
||||||
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.alias
|
|
||||||
.set_alias(&body.room_alias, &body.room_id, sender_user)?;
|
|
||||||
|
|
||||||
Ok(create_alias::v3::Response::new())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `DELETE /_matrix/client/r0/directory/room/{roomAlias}`
|
|
||||||
///
|
|
||||||
/// Deletes a room alias from this server.
|
|
||||||
///
|
|
||||||
/// - TODO: Update canonical alias event
|
|
||||||
pub async fn delete_alias_route(
|
|
||||||
body: Ruma<delete_alias::v3::Request>,
|
|
||||||
) -> Result<delete_alias::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if body.room_alias.server_name() != services().globals.server_name() {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Alias is from another server.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(ref info) = body.appservice_info {
|
|
||||||
if !info.aliases.is_match(body.room_alias.as_str()) {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"Room alias is not in namespace.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
} else if services()
|
|
||||||
.appservice
|
|
||||||
.is_exclusive_alias(&body.room_alias)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"Room alias reserved by appservice.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.alias
|
|
||||||
.remove_alias(&body.room_alias, sender_user)?;
|
|
||||||
|
|
||||||
// TODO: update alt_aliases?
|
|
||||||
|
|
||||||
Ok(delete_alias::v3::Response::new())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/directory/room/{roomAlias}`
|
|
||||||
///
|
|
||||||
/// Resolve an alias locally or over federation.
|
|
||||||
///
|
|
||||||
/// - TODO: Suggest more servers to join via
|
|
||||||
pub async fn get_alias_route(
|
|
||||||
body: Ruma<get_alias::v3::Request>,
|
|
||||||
) -> Result<get_alias::v3::Response> {
|
|
||||||
get_alias_helper(body.body.room_alias).await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) async fn get_alias_helper(
|
|
||||||
room_alias: OwnedRoomAliasId,
|
|
||||||
) -> Result<get_alias::v3::Response> {
|
|
||||||
if room_alias.server_name() != services().globals.server_name() {
|
|
||||||
let response = services()
|
|
||||||
.sending
|
|
||||||
.send_federation_request(
|
|
||||||
room_alias.server_name(),
|
|
||||||
federation::query::get_room_information::v1::Request {
|
|
||||||
room_alias: room_alias.to_owned(),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let mut servers = response.servers;
|
|
||||||
servers.shuffle(&mut rand::thread_rng());
|
|
||||||
|
|
||||||
return Ok(get_alias::v3::Response::new(response.room_id, servers));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut room_id = None;
|
|
||||||
match services().rooms.alias.resolve_local_alias(&room_alias)? {
|
|
||||||
Some(r) => room_id = Some(r),
|
|
||||||
None => {
|
|
||||||
for appservice in services().appservice.read().await.values() {
|
|
||||||
if appservice.aliases.is_match(room_alias.as_str())
|
|
||||||
&& matches!(
|
|
||||||
services()
|
|
||||||
.sending
|
|
||||||
.send_appservice_request(
|
|
||||||
appservice.registration.clone(),
|
|
||||||
appservice::query::query_room_alias::v1::Request {
|
|
||||||
room_alias: room_alias.clone(),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await,
|
|
||||||
Ok(Some(_opt_result))
|
|
||||||
)
|
|
||||||
{
|
|
||||||
room_id = Some(
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.alias
|
|
||||||
.resolve_local_alias(&room_alias)?
|
|
||||||
.ok_or_else(|| {
|
|
||||||
Error::bad_config("Appservice lied to us. Room does not exist.")
|
|
||||||
})?,
|
|
||||||
);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let room_id = match room_id {
|
|
||||||
Some(room_id) => room_id,
|
|
||||||
None => {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Room with alias not found.",
|
|
||||||
))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(get_alias::v3::Response::new(
|
|
||||||
room_id,
|
|
||||||
vec![services().globals.server_name().to_owned()],
|
|
||||||
))
|
|
||||||
}
|
|
|
@ -1,362 +0,0 @@
|
||||||
use crate::{services, Error, Result, Ruma};
|
|
||||||
use ruma::api::client::{
|
|
||||||
backup::{
|
|
||||||
add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session,
|
|
||||||
create_backup_version, delete_backup_keys, delete_backup_keys_for_room,
|
|
||||||
delete_backup_keys_for_session, delete_backup_version, get_backup_info, get_backup_keys,
|
|
||||||
get_backup_keys_for_room, get_backup_keys_for_session, get_latest_backup_info,
|
|
||||||
update_backup_version,
|
|
||||||
},
|
|
||||||
error::ErrorKind,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/room_keys/version`
|
|
||||||
///
|
|
||||||
/// Creates a new backup.
|
|
||||||
pub async fn create_backup_version_route(
|
|
||||||
body: Ruma<create_backup_version::v3::Request>,
|
|
||||||
) -> Result<create_backup_version::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let version = services()
|
|
||||||
.key_backups
|
|
||||||
.create_backup(sender_user, &body.algorithm)?;
|
|
||||||
|
|
||||||
Ok(create_backup_version::v3::Response { version })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/room_keys/version/{version}`
|
|
||||||
///
|
|
||||||
/// Update information about an existing backup. Only `auth_data` can be modified.
|
|
||||||
pub async fn update_backup_version_route(
|
|
||||||
body: Ruma<update_backup_version::v3::Request>,
|
|
||||||
) -> Result<update_backup_version::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
services()
|
|
||||||
.key_backups
|
|
||||||
.update_backup(sender_user, &body.version, &body.algorithm)?;
|
|
||||||
|
|
||||||
Ok(update_backup_version::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/room_keys/version`
|
|
||||||
///
|
|
||||||
/// Get information about the latest backup version.
|
|
||||||
pub async fn get_latest_backup_info_route(
|
|
||||||
body: Ruma<get_latest_backup_info::v3::Request>,
|
|
||||||
) -> Result<get_latest_backup_info::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let (version, algorithm) = services()
|
|
||||||
.key_backups
|
|
||||||
.get_latest_backup(sender_user)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Key backup does not exist.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
Ok(get_latest_backup_info::v3::Response {
|
|
||||||
algorithm,
|
|
||||||
count: (services().key_backups.count_keys(sender_user, &version)? as u32).into(),
|
|
||||||
etag: services().key_backups.get_etag(sender_user, &version)?,
|
|
||||||
version,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/room_keys/version`
|
|
||||||
///
|
|
||||||
/// Get information about an existing backup.
|
|
||||||
pub async fn get_backup_info_route(
|
|
||||||
body: Ruma<get_backup_info::v3::Request>,
|
|
||||||
) -> Result<get_backup_info::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let algorithm = services()
|
|
||||||
.key_backups
|
|
||||||
.get_backup(sender_user, &body.version)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Key backup does not exist.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
Ok(get_backup_info::v3::Response {
|
|
||||||
algorithm,
|
|
||||||
count: (services()
|
|
||||||
.key_backups
|
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
|
||||||
.into(),
|
|
||||||
etag: services()
|
|
||||||
.key_backups
|
|
||||||
.get_etag(sender_user, &body.version)?,
|
|
||||||
version: body.version.to_owned(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `DELETE /_matrix/client/r0/room_keys/version/{version}`
|
|
||||||
///
|
|
||||||
/// Delete an existing key backup.
|
|
||||||
///
|
|
||||||
/// - Deletes both information about the backup, as well as all key data related to the backup
|
|
||||||
pub async fn delete_backup_version_route(
|
|
||||||
body: Ruma<delete_backup_version::v3::Request>,
|
|
||||||
) -> Result<delete_backup_version::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
services()
|
|
||||||
.key_backups
|
|
||||||
.delete_backup(sender_user, &body.version)?;
|
|
||||||
|
|
||||||
Ok(delete_backup_version::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/room_keys/keys`
|
|
||||||
///
|
|
||||||
/// Add the received backup keys to the database.
|
|
||||||
///
|
|
||||||
/// - Only manipulating the most recently created version of the backup is allowed
|
|
||||||
/// - Adds the keys to the backup
|
|
||||||
/// - Returns the new number of keys in this backup and the etag
|
|
||||||
pub async fn add_backup_keys_route(
|
|
||||||
body: Ruma<add_backup_keys::v3::Request>,
|
|
||||||
) -> Result<add_backup_keys::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if Some(&body.version)
|
|
||||||
!= services()
|
|
||||||
.key_backups
|
|
||||||
.get_latest_backup_version(sender_user)?
|
|
||||||
.as_ref()
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"You may only manipulate the most recently created version of the backup.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
for (room_id, room) in &body.rooms {
|
|
||||||
for (session_id, key_data) in &room.sessions {
|
|
||||||
services().key_backups.add_key(
|
|
||||||
sender_user,
|
|
||||||
&body.version,
|
|
||||||
room_id,
|
|
||||||
session_id,
|
|
||||||
key_data,
|
|
||||||
)?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(add_backup_keys::v3::Response {
|
|
||||||
count: (services()
|
|
||||||
.key_backups
|
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
|
||||||
.into(),
|
|
||||||
etag: services()
|
|
||||||
.key_backups
|
|
||||||
.get_etag(sender_user, &body.version)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}`
|
|
||||||
///
|
|
||||||
/// Add the received backup keys to the database.
|
|
||||||
///
|
|
||||||
/// - Only manipulating the most recently created version of the backup is allowed
|
|
||||||
/// - Adds the keys to the backup
|
|
||||||
/// - Returns the new number of keys in this backup and the etag
|
|
||||||
pub async fn add_backup_keys_for_room_route(
|
|
||||||
body: Ruma<add_backup_keys_for_room::v3::Request>,
|
|
||||||
) -> Result<add_backup_keys_for_room::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if Some(&body.version)
|
|
||||||
!= services()
|
|
||||||
.key_backups
|
|
||||||
.get_latest_backup_version(sender_user)?
|
|
||||||
.as_ref()
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"You may only manipulate the most recently created version of the backup.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
for (session_id, key_data) in &body.sessions {
|
|
||||||
services().key_backups.add_key(
|
|
||||||
sender_user,
|
|
||||||
&body.version,
|
|
||||||
&body.room_id,
|
|
||||||
session_id,
|
|
||||||
key_data,
|
|
||||||
)?
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(add_backup_keys_for_room::v3::Response {
|
|
||||||
count: (services()
|
|
||||||
.key_backups
|
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
|
||||||
.into(),
|
|
||||||
etag: services()
|
|
||||||
.key_backups
|
|
||||||
.get_etag(sender_user, &body.version)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
|
|
||||||
///
|
|
||||||
/// Add the received backup key to the database.
|
|
||||||
///
|
|
||||||
/// - Only manipulating the most recently created version of the backup is allowed
|
|
||||||
/// - Adds the keys to the backup
|
|
||||||
/// - Returns the new number of keys in this backup and the etag
|
|
||||||
pub async fn add_backup_keys_for_session_route(
|
|
||||||
body: Ruma<add_backup_keys_for_session::v3::Request>,
|
|
||||||
) -> Result<add_backup_keys_for_session::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if Some(&body.version)
|
|
||||||
!= services()
|
|
||||||
.key_backups
|
|
||||||
.get_latest_backup_version(sender_user)?
|
|
||||||
.as_ref()
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"You may only manipulate the most recently created version of the backup.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
services().key_backups.add_key(
|
|
||||||
sender_user,
|
|
||||||
&body.version,
|
|
||||||
&body.room_id,
|
|
||||||
&body.session_id,
|
|
||||||
&body.session_data,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(add_backup_keys_for_session::v3::Response {
|
|
||||||
count: (services()
|
|
||||||
.key_backups
|
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
|
||||||
.into(),
|
|
||||||
etag: services()
|
|
||||||
.key_backups
|
|
||||||
.get_etag(sender_user, &body.version)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/room_keys/keys`
|
|
||||||
///
|
|
||||||
/// Retrieves all keys from the backup.
|
|
||||||
pub async fn get_backup_keys_route(
|
|
||||||
body: Ruma<get_backup_keys::v3::Request>,
|
|
||||||
) -> Result<get_backup_keys::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let rooms = services().key_backups.get_all(sender_user, &body.version)?;
|
|
||||||
|
|
||||||
Ok(get_backup_keys::v3::Response { rooms })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}`
|
|
||||||
///
|
|
||||||
/// Retrieves all keys from the backup for a given room.
|
|
||||||
pub async fn get_backup_keys_for_room_route(
|
|
||||||
body: Ruma<get_backup_keys_for_room::v3::Request>,
|
|
||||||
) -> Result<get_backup_keys_for_room::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let sessions = services()
|
|
||||||
.key_backups
|
|
||||||
.get_room(sender_user, &body.version, &body.room_id)?;
|
|
||||||
|
|
||||||
Ok(get_backup_keys_for_room::v3::Response { sessions })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
|
|
||||||
///
|
|
||||||
/// Retrieves a key from the backup.
|
|
||||||
pub async fn get_backup_keys_for_session_route(
|
|
||||||
body: Ruma<get_backup_keys_for_session::v3::Request>,
|
|
||||||
) -> Result<get_backup_keys_for_session::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let key_data = services()
|
|
||||||
.key_backups
|
|
||||||
.get_session(sender_user, &body.version, &body.room_id, &body.session_id)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Backup key not found for this user's session.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
Ok(get_backup_keys_for_session::v3::Response { key_data })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `DELETE /_matrix/client/r0/room_keys/keys`
|
|
||||||
///
|
|
||||||
/// Delete the keys from the backup.
|
|
||||||
pub async fn delete_backup_keys_route(
|
|
||||||
body: Ruma<delete_backup_keys::v3::Request>,
|
|
||||||
) -> Result<delete_backup_keys::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
services()
|
|
||||||
.key_backups
|
|
||||||
.delete_all_keys(sender_user, &body.version)?;
|
|
||||||
|
|
||||||
Ok(delete_backup_keys::v3::Response {
|
|
||||||
count: (services()
|
|
||||||
.key_backups
|
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
|
||||||
.into(),
|
|
||||||
etag: services()
|
|
||||||
.key_backups
|
|
||||||
.get_etag(sender_user, &body.version)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}`
|
|
||||||
///
|
|
||||||
/// Delete the keys from the backup for a given room.
|
|
||||||
pub async fn delete_backup_keys_for_room_route(
|
|
||||||
body: Ruma<delete_backup_keys_for_room::v3::Request>,
|
|
||||||
) -> Result<delete_backup_keys_for_room::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
services()
|
|
||||||
.key_backups
|
|
||||||
.delete_room_keys(sender_user, &body.version, &body.room_id)?;
|
|
||||||
|
|
||||||
Ok(delete_backup_keys_for_room::v3::Response {
|
|
||||||
count: (services()
|
|
||||||
.key_backups
|
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
|
||||||
.into(),
|
|
||||||
etag: services()
|
|
||||||
.key_backups
|
|
||||||
.get_etag(sender_user, &body.version)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
|
|
||||||
///
|
|
||||||
/// Delete a key from the backup.
|
|
||||||
pub async fn delete_backup_keys_for_session_route(
|
|
||||||
body: Ruma<delete_backup_keys_for_session::v3::Request>,
|
|
||||||
) -> Result<delete_backup_keys_for_session::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
services().key_backups.delete_room_key(
|
|
||||||
sender_user,
|
|
||||||
&body.version,
|
|
||||||
&body.room_id,
|
|
||||||
&body.session_id,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(delete_backup_keys_for_session::v3::Response {
|
|
||||||
count: (services()
|
|
||||||
.key_backups
|
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
|
||||||
.into(),
|
|
||||||
etag: services()
|
|
||||||
.key_backups
|
|
||||||
.get_etag(sender_user, &body.version)?,
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,28 +0,0 @@
|
||||||
use crate::{services, Result, Ruma};
|
|
||||||
use ruma::api::client::discovery::get_capabilities::{
|
|
||||||
self, Capabilities, RoomVersionStability, RoomVersionsCapability,
|
|
||||||
};
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/capabilities`
|
|
||||||
///
|
|
||||||
/// Get information on the supported feature set and other relevent capabilities of this server.
|
|
||||||
pub async fn get_capabilities_route(
|
|
||||||
_body: Ruma<get_capabilities::v3::Request>,
|
|
||||||
) -> Result<get_capabilities::v3::Response> {
|
|
||||||
let mut available = BTreeMap::new();
|
|
||||||
for room_version in &services().globals.unstable_room_versions {
|
|
||||||
available.insert(room_version.clone(), RoomVersionStability::Unstable);
|
|
||||||
}
|
|
||||||
for room_version in &services().globals.stable_room_versions {
|
|
||||||
available.insert(room_version.clone(), RoomVersionStability::Stable);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut capabilities = Capabilities::new();
|
|
||||||
capabilities.room_versions = RoomVersionsCapability {
|
|
||||||
default: services().globals.default_room_version(),
|
|
||||||
available,
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(get_capabilities::v3::Response { capabilities })
|
|
||||||
}
|
|
|
@ -1,209 +0,0 @@
|
||||||
use crate::{services, Error, Result, Ruma};
|
|
||||||
use ruma::{
|
|
||||||
api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions},
|
|
||||||
events::StateEventType,
|
|
||||||
};
|
|
||||||
use std::collections::HashSet;
|
|
||||||
use tracing::error;
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/context`
|
|
||||||
///
|
|
||||||
/// Allows loading room history around an event.
|
|
||||||
///
|
|
||||||
/// - Only works if the user is joined (TODO: always allow, but only show events if the user was
|
|
||||||
/// joined, depending on history_visibility)
|
|
||||||
pub async fn get_context_route(
|
|
||||||
body: Ruma<get_context::v3::Request>,
|
|
||||||
) -> Result<get_context::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let (lazy_load_enabled, lazy_load_send_redundant) = match &body.filter.lazy_load_options {
|
|
||||||
LazyLoadOptions::Enabled {
|
|
||||||
include_redundant_members,
|
|
||||||
} => (true, *include_redundant_members),
|
|
||||||
_ => (false, false),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut lazy_loaded = HashSet::new();
|
|
||||||
|
|
||||||
let base_token = services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.get_pdu_count(&body.event_id)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Base event id not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let base_event =
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.get_pdu(&body.event_id)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Base event not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let room_id = base_event.room_id.clone();
|
|
||||||
|
|
||||||
if !services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.user_can_see_event(sender_user, &room_id, &body.event_id)?
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::forbidden(),
|
|
||||||
"You don't have permission to view this event.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
|
||||||
sender_user,
|
|
||||||
sender_device,
|
|
||||||
&room_id,
|
|
||||||
&base_event.sender,
|
|
||||||
)? || lazy_load_send_redundant
|
|
||||||
{
|
|
||||||
lazy_loaded.insert(base_event.sender.as_str().to_owned());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use limit with maximum 100
|
|
||||||
let limit = u64::from(body.limit).min(100) as usize;
|
|
||||||
|
|
||||||
let base_event = base_event.to_room_event();
|
|
||||||
|
|
||||||
let events_before: Vec<_> = services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.pdus_until(sender_user, &room_id, base_token)?
|
|
||||||
.take(limit / 2)
|
|
||||||
.filter_map(|r| r.ok()) // Remove buggy events
|
|
||||||
.filter(|(_, pdu)| {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.user_can_see_event(sender_user, &room_id, &pdu.event_id)
|
|
||||||
.unwrap_or(false)
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
for (_, event) in &events_before {
|
|
||||||
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
|
||||||
sender_user,
|
|
||||||
sender_device,
|
|
||||||
&room_id,
|
|
||||||
&event.sender,
|
|
||||||
)? || lazy_load_send_redundant
|
|
||||||
{
|
|
||||||
lazy_loaded.insert(event.sender.as_str().to_owned());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let start_token = events_before
|
|
||||||
.last()
|
|
||||||
.map(|(count, _)| count.stringify())
|
|
||||||
.unwrap_or_else(|| base_token.stringify());
|
|
||||||
|
|
||||||
let events_before: Vec<_> = events_before
|
|
||||||
.into_iter()
|
|
||||||
.map(|(_, pdu)| pdu.to_room_event())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let events_after: Vec<_> = services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.pdus_after(sender_user, &room_id, base_token)?
|
|
||||||
.take(limit / 2)
|
|
||||||
.filter_map(|r| r.ok()) // Remove buggy events
|
|
||||||
.filter(|(_, pdu)| {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.user_can_see_event(sender_user, &room_id, &pdu.event_id)
|
|
||||||
.unwrap_or(false)
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
for (_, event) in &events_after {
|
|
||||||
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
|
||||||
sender_user,
|
|
||||||
sender_device,
|
|
||||||
&room_id,
|
|
||||||
&event.sender,
|
|
||||||
)? || lazy_load_send_redundant
|
|
||||||
{
|
|
||||||
lazy_loaded.insert(event.sender.as_str().to_owned());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let shortstatehash = match services().rooms.state_accessor.pdu_shortstatehash(
|
|
||||||
events_after
|
|
||||||
.last()
|
|
||||||
.map_or(&*body.event_id, |(_, e)| &*e.event_id),
|
|
||||||
)? {
|
|
||||||
Some(s) => s,
|
|
||||||
None => services()
|
|
||||||
.rooms
|
|
||||||
.state
|
|
||||||
.get_room_shortstatehash(&room_id)?
|
|
||||||
.expect("All rooms have state"),
|
|
||||||
};
|
|
||||||
|
|
||||||
let state_ids = services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.state_full_ids(shortstatehash)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let end_token = events_after
|
|
||||||
.last()
|
|
||||||
.map(|(count, _)| count.stringify())
|
|
||||||
.unwrap_or_else(|| base_token.stringify());
|
|
||||||
|
|
||||||
let events_after: Vec<_> = events_after
|
|
||||||
.into_iter()
|
|
||||||
.map(|(_, pdu)| pdu.to_room_event())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let mut state = Vec::new();
|
|
||||||
|
|
||||||
for (shortstatekey, id) in state_ids {
|
|
||||||
let (event_type, state_key) = services()
|
|
||||||
.rooms
|
|
||||||
.short
|
|
||||||
.get_statekey_from_short(shortstatekey)?;
|
|
||||||
|
|
||||||
if event_type != StateEventType::RoomMember {
|
|
||||||
let pdu = match services().rooms.timeline.get_pdu(&id)? {
|
|
||||||
Some(pdu) => pdu,
|
|
||||||
None => {
|
|
||||||
error!("Pdu in state not found: {}", id);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
state.push(pdu.to_state_event());
|
|
||||||
} else if !lazy_load_enabled || lazy_loaded.contains(&state_key) {
|
|
||||||
let pdu = match services().rooms.timeline.get_pdu(&id)? {
|
|
||||||
Some(pdu) => pdu,
|
|
||||||
None => {
|
|
||||||
error!("Pdu in state not found: {}", id);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
state.push(pdu.to_state_event());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let resp = get_context::v3::Response {
|
|
||||||
start: Some(start_token),
|
|
||||||
end: Some(end_token),
|
|
||||||
events_before,
|
|
||||||
event: Some(base_event),
|
|
||||||
events_after,
|
|
||||||
state,
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(resp)
|
|
||||||
}
|
|
|
@ -1,169 +0,0 @@
|
||||||
use crate::{services, utils, Error, Result, Ruma};
|
|
||||||
use ruma::api::client::{
|
|
||||||
device::{self, delete_device, delete_devices, get_device, get_devices, update_device},
|
|
||||||
error::ErrorKind,
|
|
||||||
uiaa::{AuthFlow, AuthType, UiaaInfo},
|
|
||||||
};
|
|
||||||
|
|
||||||
use super::SESSION_ID_LENGTH;
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/devices`
|
|
||||||
///
|
|
||||||
/// Get metadata on all devices of the sender user.
|
|
||||||
pub async fn get_devices_route(
|
|
||||||
body: Ruma<get_devices::v3::Request>,
|
|
||||||
) -> Result<get_devices::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let devices: Vec<device::Device> = services()
|
|
||||||
.users
|
|
||||||
.all_devices_metadata(sender_user)
|
|
||||||
.filter_map(|r| r.ok()) // Filter out buggy devices
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
Ok(get_devices::v3::Response { devices })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/devices/{deviceId}`
|
|
||||||
///
|
|
||||||
/// Get metadata on a single device of the sender user.
|
|
||||||
pub async fn get_device_route(
|
|
||||||
body: Ruma<get_device::v3::Request>,
|
|
||||||
) -> Result<get_device::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let device = services()
|
|
||||||
.users
|
|
||||||
.get_device_metadata(sender_user, &body.body.device_id)?
|
|
||||||
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?;
|
|
||||||
|
|
||||||
Ok(get_device::v3::Response { device })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/devices/{deviceId}`
|
|
||||||
///
|
|
||||||
/// Updates the metadata on a given device of the sender user.
|
|
||||||
pub async fn update_device_route(
|
|
||||||
body: Ruma<update_device::v3::Request>,
|
|
||||||
) -> Result<update_device::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let mut device = services()
|
|
||||||
.users
|
|
||||||
.get_device_metadata(sender_user, &body.device_id)?
|
|
||||||
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?;
|
|
||||||
|
|
||||||
device.display_name.clone_from(&body.display_name);
|
|
||||||
|
|
||||||
services()
|
|
||||||
.users
|
|
||||||
.update_device_metadata(sender_user, &body.device_id, &device)?;
|
|
||||||
|
|
||||||
Ok(update_device::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `DELETE /_matrix/client/r0/devices/{deviceId}`
|
|
||||||
///
|
|
||||||
/// Deletes the given device.
|
|
||||||
///
|
|
||||||
/// - Requires UIAA to verify user password
|
|
||||||
/// - Invalidates access token
|
|
||||||
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
|
|
||||||
/// - Forgets to-device events
|
|
||||||
/// - Triggers device list updates
|
|
||||||
pub async fn delete_device_route(
|
|
||||||
body: Ruma<delete_device::v3::Request>,
|
|
||||||
) -> Result<delete_device::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
// UIAA
|
|
||||||
let mut uiaainfo = UiaaInfo {
|
|
||||||
flows: vec![AuthFlow {
|
|
||||||
stages: vec![AuthType::Password],
|
|
||||||
}],
|
|
||||||
completed: Vec::new(),
|
|
||||||
params: Default::default(),
|
|
||||||
session: None,
|
|
||||||
auth_error: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(auth) = &body.auth {
|
|
||||||
let (worked, uiaainfo) =
|
|
||||||
services()
|
|
||||||
.uiaa
|
|
||||||
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
|
|
||||||
if !worked {
|
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
|
||||||
}
|
|
||||||
// Success!
|
|
||||||
} else if let Some(json) = body.json_body {
|
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
|
||||||
services()
|
|
||||||
.uiaa
|
|
||||||
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
|
||||||
} else {
|
|
||||||
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
|
||||||
}
|
|
||||||
|
|
||||||
services()
|
|
||||||
.users
|
|
||||||
.remove_device(sender_user, &body.device_id)?;
|
|
||||||
|
|
||||||
Ok(delete_device::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/devices/{deviceId}`
|
|
||||||
///
|
|
||||||
/// Deletes the given device.
|
|
||||||
///
|
|
||||||
/// - Requires UIAA to verify user password
|
|
||||||
///
|
|
||||||
/// For each device:
|
|
||||||
/// - Invalidates access token
|
|
||||||
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
|
|
||||||
/// - Forgets to-device events
|
|
||||||
/// - Triggers device list updates
|
|
||||||
pub async fn delete_devices_route(
|
|
||||||
body: Ruma<delete_devices::v3::Request>,
|
|
||||||
) -> Result<delete_devices::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
// UIAA
|
|
||||||
let mut uiaainfo = UiaaInfo {
|
|
||||||
flows: vec![AuthFlow {
|
|
||||||
stages: vec![AuthType::Password],
|
|
||||||
}],
|
|
||||||
completed: Vec::new(),
|
|
||||||
params: Default::default(),
|
|
||||||
session: None,
|
|
||||||
auth_error: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(auth) = &body.auth {
|
|
||||||
let (worked, uiaainfo) =
|
|
||||||
services()
|
|
||||||
.uiaa
|
|
||||||
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
|
|
||||||
if !worked {
|
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
|
||||||
}
|
|
||||||
// Success!
|
|
||||||
} else if let Some(json) = body.json_body {
|
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
|
||||||
services()
|
|
||||||
.uiaa
|
|
||||||
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
|
||||||
} else {
|
|
||||||
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
|
||||||
}
|
|
||||||
|
|
||||||
for device_id in &body.devices {
|
|
||||||
services().users.remove_device(sender_user, device_id)?
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(delete_devices::v3::Response {})
|
|
||||||
}
|
|
|
@ -1,369 +0,0 @@
|
||||||
use crate::{services, Error, Result, Ruma};
|
|
||||||
use ruma::{
|
|
||||||
api::{
|
|
||||||
client::{
|
|
||||||
directory::{
|
|
||||||
get_public_rooms, get_public_rooms_filtered, get_room_visibility,
|
|
||||||
set_room_visibility,
|
|
||||||
},
|
|
||||||
error::ErrorKind,
|
|
||||||
room,
|
|
||||||
},
|
|
||||||
federation,
|
|
||||||
},
|
|
||||||
directory::{Filter, PublicRoomJoinRule, PublicRoomsChunk, RoomNetwork},
|
|
||||||
events::{
|
|
||||||
room::{
|
|
||||||
avatar::RoomAvatarEventContent,
|
|
||||||
canonical_alias::RoomCanonicalAliasEventContent,
|
|
||||||
create::RoomCreateEventContent,
|
|
||||||
guest_access::{GuestAccess, RoomGuestAccessEventContent},
|
|
||||||
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
|
|
||||||
join_rules::{JoinRule, RoomJoinRulesEventContent},
|
|
||||||
topic::RoomTopicEventContent,
|
|
||||||
},
|
|
||||||
StateEventType,
|
|
||||||
},
|
|
||||||
ServerName, UInt,
|
|
||||||
};
|
|
||||||
use tracing::{error, info, warn};
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/publicRooms`
|
|
||||||
///
|
|
||||||
/// Lists the public rooms on this server.
|
|
||||||
///
|
|
||||||
/// - Rooms are ordered by the number of joined members
|
|
||||||
pub async fn get_public_rooms_filtered_route(
|
|
||||||
body: Ruma<get_public_rooms_filtered::v3::Request>,
|
|
||||||
) -> Result<get_public_rooms_filtered::v3::Response> {
|
|
||||||
get_public_rooms_filtered_helper(
|
|
||||||
body.server.as_deref(),
|
|
||||||
body.limit,
|
|
||||||
body.since.as_deref(),
|
|
||||||
&body.filter,
|
|
||||||
&body.room_network,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/publicRooms`
|
|
||||||
///
|
|
||||||
/// Lists the public rooms on this server.
|
|
||||||
///
|
|
||||||
/// - Rooms are ordered by the number of joined members
|
|
||||||
pub async fn get_public_rooms_route(
|
|
||||||
body: Ruma<get_public_rooms::v3::Request>,
|
|
||||||
) -> Result<get_public_rooms::v3::Response> {
|
|
||||||
let response = get_public_rooms_filtered_helper(
|
|
||||||
body.server.as_deref(),
|
|
||||||
body.limit,
|
|
||||||
body.since.as_deref(),
|
|
||||||
&Filter::default(),
|
|
||||||
&RoomNetwork::Matrix,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(get_public_rooms::v3::Response {
|
|
||||||
chunk: response.chunk,
|
|
||||||
prev_batch: response.prev_batch,
|
|
||||||
next_batch: response.next_batch,
|
|
||||||
total_room_count_estimate: response.total_room_count_estimate,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/directory/list/room/{roomId}`
|
|
||||||
///
|
|
||||||
/// Sets the visibility of a given room in the room directory.
|
|
||||||
///
|
|
||||||
/// - TODO: Access control checks
|
|
||||||
pub async fn set_room_visibility_route(
|
|
||||||
body: Ruma<set_room_visibility::v3::Request>,
|
|
||||||
) -> Result<set_room_visibility::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if !services().rooms.metadata.exists(&body.room_id)? {
|
|
||||||
// Return 404 if the room doesn't exist
|
|
||||||
return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found"));
|
|
||||||
}
|
|
||||||
|
|
||||||
match &body.visibility {
|
|
||||||
room::Visibility::Public => {
|
|
||||||
services().rooms.directory.set_public(&body.room_id)?;
|
|
||||||
info!("{} made {} public", sender_user, body.room_id);
|
|
||||||
}
|
|
||||||
room::Visibility::Private => services().rooms.directory.set_not_public(&body.room_id)?,
|
|
||||||
_ => {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Room visibility type is not supported.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(set_room_visibility::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/directory/list/room/{roomId}`
|
|
||||||
///
|
|
||||||
/// Gets the visibility of a given room in the room directory.
|
|
||||||
pub async fn get_room_visibility_route(
|
|
||||||
body: Ruma<get_room_visibility::v3::Request>,
|
|
||||||
) -> Result<get_room_visibility::v3::Response> {
|
|
||||||
if !services().rooms.metadata.exists(&body.room_id)? {
|
|
||||||
// Return 404 if the room doesn't exist
|
|
||||||
return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found"));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(get_room_visibility::v3::Response {
|
|
||||||
visibility: if services().rooms.directory.is_public_room(&body.room_id)? {
|
|
||||||
room::Visibility::Public
|
|
||||||
} else {
|
|
||||||
room::Visibility::Private
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) async fn get_public_rooms_filtered_helper(
|
|
||||||
server: Option<&ServerName>,
|
|
||||||
limit: Option<UInt>,
|
|
||||||
since: Option<&str>,
|
|
||||||
filter: &Filter,
|
|
||||||
_network: &RoomNetwork,
|
|
||||||
) -> Result<get_public_rooms_filtered::v3::Response> {
|
|
||||||
if let Some(other_server) =
|
|
||||||
server.filter(|server| *server != services().globals.server_name().as_str())
|
|
||||||
{
|
|
||||||
let response = services()
|
|
||||||
.sending
|
|
||||||
.send_federation_request(
|
|
||||||
other_server,
|
|
||||||
federation::directory::get_public_rooms_filtered::v1::Request {
|
|
||||||
limit,
|
|
||||||
since: since.map(ToOwned::to_owned),
|
|
||||||
filter: Filter {
|
|
||||||
generic_search_term: filter.generic_search_term.clone(),
|
|
||||||
room_types: filter.room_types.clone(),
|
|
||||||
},
|
|
||||||
room_network: RoomNetwork::Matrix,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
return Ok(get_public_rooms_filtered::v3::Response {
|
|
||||||
chunk: response.chunk,
|
|
||||||
prev_batch: response.prev_batch,
|
|
||||||
next_batch: response.next_batch,
|
|
||||||
total_room_count_estimate: response.total_room_count_estimate,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
let limit = limit.map_or(10, u64::from);
|
|
||||||
let mut num_since = 0_u64;
|
|
||||||
|
|
||||||
if let Some(s) = &since {
|
|
||||||
let mut characters = s.chars();
|
|
||||||
let backwards = match characters.next() {
|
|
||||||
Some('n') => false,
|
|
||||||
Some('p') => true,
|
|
||||||
_ => {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Invalid `since` token",
|
|
||||||
))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
num_since = characters
|
|
||||||
.collect::<String>()
|
|
||||||
.parse()
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token."))?;
|
|
||||||
|
|
||||||
if backwards {
|
|
||||||
num_since = num_since.saturating_sub(limit);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut all_rooms: Vec<_> = services()
|
|
||||||
.rooms
|
|
||||||
.directory
|
|
||||||
.public_rooms()
|
|
||||||
.map(|room_id| {
|
|
||||||
let room_id = room_id?;
|
|
||||||
|
|
||||||
let chunk = PublicRoomsChunk {
|
|
||||||
canonical_alias: services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")?
|
|
||||||
.map_or(Ok(None), |s| {
|
|
||||||
serde_json::from_str(s.content.get())
|
|
||||||
.map(|c: RoomCanonicalAliasEventContent| c.alias)
|
|
||||||
.map_err(|_| {
|
|
||||||
Error::bad_database("Invalid canonical alias event in database.")
|
|
||||||
})
|
|
||||||
})?,
|
|
||||||
name: services().rooms.state_accessor.get_name(&room_id)?,
|
|
||||||
num_joined_members: services()
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.room_joined_count(&room_id)?
|
|
||||||
.unwrap_or_else(|| {
|
|
||||||
warn!("Room {} has no member count", room_id);
|
|
||||||
0
|
|
||||||
})
|
|
||||||
.try_into()
|
|
||||||
.expect("user count should not be that big"),
|
|
||||||
topic: services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_get(&room_id, &StateEventType::RoomTopic, "")?
|
|
||||||
.map_or(Ok(None), |s| {
|
|
||||||
serde_json::from_str(s.content.get())
|
|
||||||
.map(|c: RoomTopicEventContent| Some(c.topic))
|
|
||||||
.map_err(|_| {
|
|
||||||
error!("Invalid room topic event in database for room {}", room_id);
|
|
||||||
Error::bad_database("Invalid room topic event in database.")
|
|
||||||
})
|
|
||||||
})?,
|
|
||||||
world_readable: services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")?
|
|
||||||
.map_or(Ok(false), |s| {
|
|
||||||
serde_json::from_str(s.content.get())
|
|
||||||
.map(|c: RoomHistoryVisibilityEventContent| {
|
|
||||||
c.history_visibility == HistoryVisibility::WorldReadable
|
|
||||||
})
|
|
||||||
.map_err(|_| {
|
|
||||||
Error::bad_database(
|
|
||||||
"Invalid room history visibility event in database.",
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})?,
|
|
||||||
guest_can_join: services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")?
|
|
||||||
.map_or(Ok(false), |s| {
|
|
||||||
serde_json::from_str(s.content.get())
|
|
||||||
.map(|c: RoomGuestAccessEventContent| {
|
|
||||||
c.guest_access == GuestAccess::CanJoin
|
|
||||||
})
|
|
||||||
.map_err(|_| {
|
|
||||||
Error::bad_database("Invalid room guest access event in database.")
|
|
||||||
})
|
|
||||||
})?,
|
|
||||||
avatar_url: services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_get(&room_id, &StateEventType::RoomAvatar, "")?
|
|
||||||
.map(|s| {
|
|
||||||
serde_json::from_str(s.content.get())
|
|
||||||
.map(|c: RoomAvatarEventContent| c.url)
|
|
||||||
.map_err(|_| {
|
|
||||||
Error::bad_database("Invalid room avatar event in database.")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.transpose()?
|
|
||||||
// url is now an Option<String> so we must flatten
|
|
||||||
.flatten(),
|
|
||||||
join_rule: services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_get(&room_id, &StateEventType::RoomJoinRules, "")?
|
|
||||||
.map(|s| {
|
|
||||||
serde_json::from_str(s.content.get())
|
|
||||||
.map(|c: RoomJoinRulesEventContent| match c.join_rule {
|
|
||||||
JoinRule::Public => Some(PublicRoomJoinRule::Public),
|
|
||||||
JoinRule::Knock => Some(PublicRoomJoinRule::Knock),
|
|
||||||
_ => None,
|
|
||||||
})
|
|
||||||
.map_err(|e| {
|
|
||||||
error!("Invalid room join rule event in database: {}", e);
|
|
||||||
Error::BadDatabase("Invalid room join rule event in database.")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.transpose()?
|
|
||||||
.flatten()
|
|
||||||
.ok_or_else(|| Error::bad_database("Missing room join rule event for room."))?,
|
|
||||||
room_type: services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_get(&room_id, &StateEventType::RoomCreate, "")?
|
|
||||||
.map(|s| {
|
|
||||||
serde_json::from_str::<RoomCreateEventContent>(s.content.get()).map_err(
|
|
||||||
|e| {
|
|
||||||
error!("Invalid room create event in database: {}", e);
|
|
||||||
Error::BadDatabase("Invalid room create event in database.")
|
|
||||||
},
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.transpose()?
|
|
||||||
.and_then(|e| e.room_type),
|
|
||||||
room_id,
|
|
||||||
};
|
|
||||||
Ok(chunk)
|
|
||||||
})
|
|
||||||
.filter_map(|r: Result<_>| r.ok()) // Filter out buggy rooms
|
|
||||||
.filter(|chunk| {
|
|
||||||
if let Some(query) = filter
|
|
||||||
.generic_search_term
|
|
||||||
.as_ref()
|
|
||||||
.map(|q| q.to_lowercase())
|
|
||||||
{
|
|
||||||
if let Some(name) = &chunk.name {
|
|
||||||
if name.as_str().to_lowercase().contains(&query) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(topic) = &chunk.topic {
|
|
||||||
if topic.to_lowercase().contains(&query) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(canonical_alias) = &chunk.canonical_alias {
|
|
||||||
if canonical_alias.as_str().to_lowercase().contains(&query) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
false
|
|
||||||
} else {
|
|
||||||
// No search term
|
|
||||||
true
|
|
||||||
}
|
|
||||||
})
|
|
||||||
// We need to collect all, so we can sort by member count
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members));
|
|
||||||
|
|
||||||
let total_room_count_estimate = (all_rooms.len() as u32).into();
|
|
||||||
|
|
||||||
let chunk: Vec<_> = all_rooms
|
|
||||||
.into_iter()
|
|
||||||
.skip(num_since as usize)
|
|
||||||
.take(limit as usize)
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let prev_batch = if num_since == 0 {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(format!("p{num_since}"))
|
|
||||||
};
|
|
||||||
|
|
||||||
let next_batch = if chunk.len() < limit as usize {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(format!("n{}", num_since + limit))
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(get_public_rooms_filtered::v3::Response {
|
|
||||||
chunk,
|
|
||||||
prev_batch,
|
|
||||||
next_batch,
|
|
||||||
total_room_count_estimate: Some(total_room_count_estimate),
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,34 +0,0 @@
|
||||||
use crate::{services, Error, Result, Ruma};
|
|
||||||
use ruma::api::client::{
|
|
||||||
error::ErrorKind,
|
|
||||||
filter::{create_filter, get_filter},
|
|
||||||
};
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}`
|
|
||||||
///
|
|
||||||
/// Loads a filter that was previously created.
|
|
||||||
///
|
|
||||||
/// - A user can only access their own filters
|
|
||||||
pub async fn get_filter_route(
|
|
||||||
body: Ruma<get_filter::v3::Request>,
|
|
||||||
) -> Result<get_filter::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let filter = match services().users.get_filter(sender_user, &body.filter_id)? {
|
|
||||||
Some(filter) => filter,
|
|
||||||
None => return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found.")),
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(get_filter::v3::Response::new(filter))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/user/{userId}/filter`
|
|
||||||
///
|
|
||||||
/// Creates a new filter to be used by other endpoints.
|
|
||||||
pub async fn create_filter_route(
|
|
||||||
body: Ruma<create_filter::v3::Request>,
|
|
||||||
) -> Result<create_filter::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
Ok(create_filter::v3::Response::new(
|
|
||||||
services().users.create_filter(sender_user, &body.filter)?,
|
|
||||||
))
|
|
||||||
}
|
|
|
@ -1,536 +0,0 @@
|
||||||
use super::SESSION_ID_LENGTH;
|
|
||||||
use crate::{services, utils, Error, Result, Ruma};
|
|
||||||
use futures_util::{stream::FuturesUnordered, StreamExt};
|
|
||||||
use ruma::{
|
|
||||||
api::{
|
|
||||||
client::{
|
|
||||||
error::ErrorKind,
|
|
||||||
keys::{
|
|
||||||
claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures,
|
|
||||||
upload_signing_keys,
|
|
||||||
},
|
|
||||||
uiaa::{AuthFlow, AuthType, UiaaInfo},
|
|
||||||
},
|
|
||||||
federation,
|
|
||||||
},
|
|
||||||
serde::Raw,
|
|
||||||
DeviceKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId,
|
|
||||||
};
|
|
||||||
use serde_json::json;
|
|
||||||
use std::{
|
|
||||||
collections::{hash_map, BTreeMap, HashMap, HashSet},
|
|
||||||
time::{Duration, Instant},
|
|
||||||
};
|
|
||||||
use tracing::debug;
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/keys/upload`
|
|
||||||
///
|
|
||||||
/// Publish end-to-end encryption keys for the sender device.
|
|
||||||
///
|
|
||||||
/// - Adds one time keys
|
|
||||||
/// - If there are no device keys yet: Adds device keys (TODO: merge with existing keys?)
|
|
||||||
pub async fn upload_keys_route(
|
|
||||||
body: Ruma<upload_keys::v3::Request>,
|
|
||||||
) -> Result<upload_keys::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
for (key_key, key_value) in &body.one_time_keys {
|
|
||||||
services()
|
|
||||||
.users
|
|
||||||
.add_one_time_key(sender_user, sender_device, key_key, key_value)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(device_keys) = &body.device_keys {
|
|
||||||
// TODO: merge this and the existing event?
|
|
||||||
// This check is needed to assure that signatures are kept
|
|
||||||
if services()
|
|
||||||
.users
|
|
||||||
.get_device_keys(sender_user, sender_device)?
|
|
||||||
.is_none()
|
|
||||||
{
|
|
||||||
services()
|
|
||||||
.users
|
|
||||||
.add_device_keys(sender_user, sender_device, device_keys)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(upload_keys::v3::Response {
|
|
||||||
one_time_key_counts: services()
|
|
||||||
.users
|
|
||||||
.count_one_time_keys(sender_user, sender_device)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/keys/query`
|
|
||||||
///
|
|
||||||
/// Get end-to-end encryption keys for the given users.
|
|
||||||
///
|
|
||||||
/// - Always fetches users from other servers over federation
|
|
||||||
/// - Gets master keys, self-signing keys, user signing keys and device keys.
|
|
||||||
/// - The master and self-signing keys contain signatures that the user is allowed to see
|
|
||||||
pub async fn get_keys_route(body: Ruma<get_keys::v3::Request>) -> Result<get_keys::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let response =
|
|
||||||
get_keys_helper(Some(sender_user), &body.device_keys, |u| u == sender_user).await?;
|
|
||||||
|
|
||||||
Ok(response)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/keys/claim`
|
|
||||||
///
|
|
||||||
/// Claims one-time keys
|
|
||||||
pub async fn claim_keys_route(
|
|
||||||
body: Ruma<claim_keys::v3::Request>,
|
|
||||||
) -> Result<claim_keys::v3::Response> {
|
|
||||||
let response = claim_keys_helper(&body.one_time_keys).await?;
|
|
||||||
|
|
||||||
Ok(response)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/keys/device_signing/upload`
|
|
||||||
///
|
|
||||||
/// Uploads end-to-end key information for the sender user.
|
|
||||||
///
|
|
||||||
/// - Requires UIAA to verify password
|
|
||||||
pub async fn upload_signing_keys_route(
|
|
||||||
body: Ruma<upload_signing_keys::v3::Request>,
|
|
||||||
) -> Result<upload_signing_keys::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
// UIAA
|
|
||||||
let mut uiaainfo = UiaaInfo {
|
|
||||||
flows: vec![AuthFlow {
|
|
||||||
stages: vec![AuthType::Password],
|
|
||||||
}],
|
|
||||||
completed: Vec::new(),
|
|
||||||
params: Default::default(),
|
|
||||||
session: None,
|
|
||||||
auth_error: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(auth) = &body.auth {
|
|
||||||
let (worked, uiaainfo) =
|
|
||||||
services()
|
|
||||||
.uiaa
|
|
||||||
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
|
|
||||||
if !worked {
|
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
|
||||||
}
|
|
||||||
// Success!
|
|
||||||
} else if let Some(json) = body.json_body {
|
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
|
||||||
services()
|
|
||||||
.uiaa
|
|
||||||
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
|
||||||
} else {
|
|
||||||
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(master_key) = &body.master_key {
|
|
||||||
services().users.add_cross_signing_keys(
|
|
||||||
sender_user,
|
|
||||||
master_key,
|
|
||||||
&body.self_signing_key,
|
|
||||||
&body.user_signing_key,
|
|
||||||
true, // notify so that other users see the new keys
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(upload_signing_keys::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/keys/signatures/upload`
|
|
||||||
///
|
|
||||||
/// Uploads end-to-end key signatures from the sender user.
|
|
||||||
pub async fn upload_signatures_route(
|
|
||||||
body: Ruma<upload_signatures::v3::Request>,
|
|
||||||
) -> Result<upload_signatures::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
for (user_id, keys) in &body.signed_keys {
|
|
||||||
for (key_id, key) in keys {
|
|
||||||
let key = serde_json::to_value(key)
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid key JSON"))?;
|
|
||||||
|
|
||||||
for signature in key
|
|
||||||
.get("signatures")
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Missing signatures field.",
|
|
||||||
))?
|
|
||||||
.get(sender_user.to_string())
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Invalid user in signatures field.",
|
|
||||||
))?
|
|
||||||
.as_object()
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Invalid signature.",
|
|
||||||
))?
|
|
||||||
.clone()
|
|
||||||
.into_iter()
|
|
||||||
{
|
|
||||||
// Signature validation?
|
|
||||||
let signature = (
|
|
||||||
signature.0,
|
|
||||||
signature
|
|
||||||
.1
|
|
||||||
.as_str()
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Invalid signature value.",
|
|
||||||
))?
|
|
||||||
.to_owned(),
|
|
||||||
);
|
|
||||||
services()
|
|
||||||
.users
|
|
||||||
.sign_key(user_id, key_id, signature, sender_user)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(upload_signatures::v3::Response {
|
|
||||||
failures: BTreeMap::new(), // TODO: integrate
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/keys/changes`
|
|
||||||
///
|
|
||||||
/// Gets a list of users who have updated their device identity keys since the previous sync token.
|
|
||||||
///
|
|
||||||
/// - TODO: left users
|
|
||||||
pub async fn get_key_changes_route(
|
|
||||||
body: Ruma<get_key_changes::v3::Request>,
|
|
||||||
) -> Result<get_key_changes::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let mut device_list_updates = HashSet::new();
|
|
||||||
|
|
||||||
device_list_updates.extend(
|
|
||||||
services()
|
|
||||||
.users
|
|
||||||
.keys_changed(
|
|
||||||
sender_user.as_str(),
|
|
||||||
body.from
|
|
||||||
.parse()
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?,
|
|
||||||
Some(
|
|
||||||
body.to
|
|
||||||
.parse()
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`."))?,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
.filter_map(|r| r.ok()),
|
|
||||||
);
|
|
||||||
|
|
||||||
for room_id in services()
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.rooms_joined(sender_user)
|
|
||||||
.filter_map(|r| r.ok())
|
|
||||||
{
|
|
||||||
device_list_updates.extend(
|
|
||||||
services()
|
|
||||||
.users
|
|
||||||
.keys_changed(
|
|
||||||
room_id.as_ref(),
|
|
||||||
body.from.parse().map_err(|_| {
|
|
||||||
Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`.")
|
|
||||||
})?,
|
|
||||||
Some(body.to.parse().map_err(|_| {
|
|
||||||
Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`.")
|
|
||||||
})?),
|
|
||||||
)
|
|
||||||
.filter_map(|r| r.ok()),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Ok(get_key_changes::v3::Response {
|
|
||||||
changed: device_list_updates.into_iter().collect(),
|
|
||||||
left: Vec::new(), // TODO
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
|
||||||
sender_user: Option<&UserId>,
|
|
||||||
device_keys_input: &BTreeMap<OwnedUserId, Vec<OwnedDeviceId>>,
|
|
||||||
allowed_signatures: F,
|
|
||||||
) -> Result<get_keys::v3::Response> {
|
|
||||||
let mut master_keys = BTreeMap::new();
|
|
||||||
let mut self_signing_keys = BTreeMap::new();
|
|
||||||
let mut user_signing_keys = BTreeMap::new();
|
|
||||||
let mut device_keys = BTreeMap::new();
|
|
||||||
|
|
||||||
let mut get_over_federation = HashMap::new();
|
|
||||||
|
|
||||||
for (user_id, device_ids) in device_keys_input {
|
|
||||||
let user_id: &UserId = user_id;
|
|
||||||
|
|
||||||
if user_id.server_name() != services().globals.server_name() {
|
|
||||||
get_over_federation
|
|
||||||
.entry(user_id.server_name())
|
|
||||||
.or_insert_with(Vec::new)
|
|
||||||
.push((user_id, device_ids));
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if device_ids.is_empty() {
|
|
||||||
let mut container = BTreeMap::new();
|
|
||||||
for device_id in services().users.all_device_ids(user_id) {
|
|
||||||
let device_id = device_id?;
|
|
||||||
if let Some(mut keys) = services().users.get_device_keys(user_id, &device_id)? {
|
|
||||||
let metadata = services()
|
|
||||||
.users
|
|
||||||
.get_device_metadata(user_id, &device_id)?
|
|
||||||
.ok_or_else(|| {
|
|
||||||
Error::bad_database("all_device_keys contained nonexistent device.")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
add_unsigned_device_display_name(&mut keys, metadata)
|
|
||||||
.map_err(|_| Error::bad_database("invalid device keys in database"))?;
|
|
||||||
container.insert(device_id, keys);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
device_keys.insert(user_id.to_owned(), container);
|
|
||||||
} else {
|
|
||||||
for device_id in device_ids {
|
|
||||||
let mut container = BTreeMap::new();
|
|
||||||
if let Some(mut keys) = services().users.get_device_keys(user_id, device_id)? {
|
|
||||||
let metadata = services()
|
|
||||||
.users
|
|
||||||
.get_device_metadata(user_id, device_id)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Tried to get keys for nonexistent device.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
add_unsigned_device_display_name(&mut keys, metadata)
|
|
||||||
.map_err(|_| Error::bad_database("invalid device keys in database"))?;
|
|
||||||
container.insert(device_id.to_owned(), keys);
|
|
||||||
}
|
|
||||||
device_keys.insert(user_id.to_owned(), container);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(master_key) =
|
|
||||||
services()
|
|
||||||
.users
|
|
||||||
.get_master_key(sender_user, user_id, &allowed_signatures)?
|
|
||||||
{
|
|
||||||
master_keys.insert(user_id.to_owned(), master_key);
|
|
||||||
}
|
|
||||||
if let Some(self_signing_key) =
|
|
||||||
services()
|
|
||||||
.users
|
|
||||||
.get_self_signing_key(sender_user, user_id, &allowed_signatures)?
|
|
||||||
{
|
|
||||||
self_signing_keys.insert(user_id.to_owned(), self_signing_key);
|
|
||||||
}
|
|
||||||
if Some(user_id) == sender_user {
|
|
||||||
if let Some(user_signing_key) = services().users.get_user_signing_key(user_id)? {
|
|
||||||
user_signing_keys.insert(user_id.to_owned(), user_signing_key);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut failures = BTreeMap::new();
|
|
||||||
|
|
||||||
let back_off = |id| async {
|
|
||||||
match services()
|
|
||||||
.globals
|
|
||||||
.bad_query_ratelimiter
|
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.entry(id)
|
|
||||||
{
|
|
||||||
hash_map::Entry::Vacant(e) => {
|
|
||||||
e.insert((Instant::now(), 1));
|
|
||||||
}
|
|
||||||
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut futures: FuturesUnordered<_> = get_over_federation
|
|
||||||
.into_iter()
|
|
||||||
.map(|(server, vec)| async move {
|
|
||||||
if let Some((time, tries)) = services()
|
|
||||||
.globals
|
|
||||||
.bad_query_ratelimiter
|
|
||||||
.read()
|
|
||||||
.await
|
|
||||||
.get(server)
|
|
||||||
{
|
|
||||||
// Exponential backoff
|
|
||||||
let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries);
|
|
||||||
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
|
|
||||||
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
|
|
||||||
}
|
|
||||||
|
|
||||||
if time.elapsed() < min_elapsed_duration {
|
|
||||||
debug!("Backing off query from {:?}", server);
|
|
||||||
return (
|
|
||||||
server,
|
|
||||||
Err(Error::BadServerResponse("bad query, still backing off")),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut device_keys_input_fed = BTreeMap::new();
|
|
||||||
for (user_id, keys) in vec {
|
|
||||||
device_keys_input_fed.insert(user_id.to_owned(), keys.clone());
|
|
||||||
}
|
|
||||||
(
|
|
||||||
server,
|
|
||||||
tokio::time::timeout(
|
|
||||||
Duration::from_secs(25),
|
|
||||||
services().sending.send_federation_request(
|
|
||||||
server,
|
|
||||||
federation::keys::get_keys::v1::Request {
|
|
||||||
device_keys: device_keys_input_fed,
|
|
||||||
},
|
|
||||||
),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.map_err(|_e| Error::BadServerResponse("Query took too long")),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
while let Some((server, response)) = futures.next().await {
|
|
||||||
match response {
|
|
||||||
Ok(Ok(response)) => {
|
|
||||||
for (user, masterkey) in response.master_keys {
|
|
||||||
let (master_key_id, mut master_key) =
|
|
||||||
services().users.parse_master_key(&user, &masterkey)?;
|
|
||||||
|
|
||||||
if let Some(our_master_key) = services().users.get_key(
|
|
||||||
&master_key_id,
|
|
||||||
sender_user,
|
|
||||||
&user,
|
|
||||||
&allowed_signatures,
|
|
||||||
)? {
|
|
||||||
let (_, our_master_key) =
|
|
||||||
services().users.parse_master_key(&user, &our_master_key)?;
|
|
||||||
master_key.signatures.extend(our_master_key.signatures);
|
|
||||||
}
|
|
||||||
let json = serde_json::to_value(master_key).expect("to_value always works");
|
|
||||||
let raw = serde_json::from_value(json).expect("Raw::from_value always works");
|
|
||||||
services().users.add_cross_signing_keys(
|
|
||||||
&user, &raw, &None, &None,
|
|
||||||
false, // Dont notify. A notification would trigger another key request resulting in an endless loop
|
|
||||||
)?;
|
|
||||||
master_keys.insert(user, raw);
|
|
||||||
}
|
|
||||||
|
|
||||||
self_signing_keys.extend(response.self_signing_keys);
|
|
||||||
device_keys.extend(response.device_keys);
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
back_off(server.to_owned()).await;
|
|
||||||
|
|
||||||
failures.insert(server.to_string(), json!({}));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(get_keys::v3::Response {
|
|
||||||
master_keys,
|
|
||||||
self_signing_keys,
|
|
||||||
user_signing_keys,
|
|
||||||
device_keys,
|
|
||||||
failures,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn add_unsigned_device_display_name(
|
|
||||||
keys: &mut Raw<ruma::encryption::DeviceKeys>,
|
|
||||||
metadata: ruma::api::client::device::Device,
|
|
||||||
) -> serde_json::Result<()> {
|
|
||||||
if let Some(display_name) = metadata.display_name {
|
|
||||||
let mut object = keys.deserialize_as::<serde_json::Map<String, serde_json::Value>>()?;
|
|
||||||
|
|
||||||
let unsigned = object.entry("unsigned").or_insert_with(|| json!({}));
|
|
||||||
if let serde_json::Value::Object(unsigned_object) = unsigned {
|
|
||||||
unsigned_object.insert("device_display_name".to_owned(), display_name.into());
|
|
||||||
}
|
|
||||||
|
|
||||||
*keys = Raw::from_json(serde_json::value::to_raw_value(&object)?);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) async fn claim_keys_helper(
|
|
||||||
one_time_keys_input: &BTreeMap<OwnedUserId, BTreeMap<OwnedDeviceId, DeviceKeyAlgorithm>>,
|
|
||||||
) -> Result<claim_keys::v3::Response> {
|
|
||||||
let mut one_time_keys = BTreeMap::new();
|
|
||||||
|
|
||||||
let mut get_over_federation = BTreeMap::new();
|
|
||||||
|
|
||||||
for (user_id, map) in one_time_keys_input {
|
|
||||||
if user_id.server_name() != services().globals.server_name() {
|
|
||||||
get_over_federation
|
|
||||||
.entry(user_id.server_name())
|
|
||||||
.or_insert_with(Vec::new)
|
|
||||||
.push((user_id, map));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut container = BTreeMap::new();
|
|
||||||
for (device_id, key_algorithm) in map {
|
|
||||||
if let Some(one_time_keys) =
|
|
||||||
services()
|
|
||||||
.users
|
|
||||||
.take_one_time_key(user_id, device_id, key_algorithm)?
|
|
||||||
{
|
|
||||||
let mut c = BTreeMap::new();
|
|
||||||
c.insert(one_time_keys.0, one_time_keys.1);
|
|
||||||
container.insert(device_id.clone(), c);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
one_time_keys.insert(user_id.clone(), container);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut failures = BTreeMap::new();
|
|
||||||
|
|
||||||
let mut futures: FuturesUnordered<_> = get_over_federation
|
|
||||||
.into_iter()
|
|
||||||
.map(|(server, vec)| async move {
|
|
||||||
let mut one_time_keys_input_fed = BTreeMap::new();
|
|
||||||
for (user_id, keys) in vec {
|
|
||||||
one_time_keys_input_fed.insert(user_id.clone(), keys.clone());
|
|
||||||
}
|
|
||||||
(
|
|
||||||
server,
|
|
||||||
services()
|
|
||||||
.sending
|
|
||||||
.send_federation_request(
|
|
||||||
server,
|
|
||||||
federation::keys::claim_keys::v1::Request {
|
|
||||||
one_time_keys: one_time_keys_input_fed,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
while let Some((server, response)) = futures.next().await {
|
|
||||||
match response {
|
|
||||||
Ok(keys) => {
|
|
||||||
one_time_keys.extend(keys.one_time_keys);
|
|
||||||
}
|
|
||||||
Err(_e) => {
|
|
||||||
failures.insert(server.to_string(), json!({}));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(claim_keys::v3::Response {
|
|
||||||
failures,
|
|
||||||
one_time_keys,
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,467 +0,0 @@
|
||||||
// Unauthenticated media is deprecated
|
|
||||||
#![allow(deprecated)]
|
|
||||||
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use crate::{service::media::FileMeta, services, utils, Error, Result, Ruma};
|
|
||||||
use http::header::{CONTENT_DISPOSITION, CONTENT_TYPE};
|
|
||||||
use ruma::{
|
|
||||||
api::{
|
|
||||||
client::{
|
|
||||||
authenticated_media::{
|
|
||||||
get_content, get_content_as_filename, get_content_thumbnail, get_media_config,
|
|
||||||
},
|
|
||||||
error::ErrorKind,
|
|
||||||
media::{self, create_content},
|
|
||||||
},
|
|
||||||
federation::authenticated_media::{self as federation_media, FileOrLocation},
|
|
||||||
},
|
|
||||||
http_headers::{ContentDisposition, ContentDispositionType},
|
|
||||||
media::Method,
|
|
||||||
ServerName, UInt,
|
|
||||||
};
|
|
||||||
|
|
||||||
const MXC_LENGTH: usize = 32;
|
|
||||||
|
|
||||||
/// # `GET /_matrix/media/r0/config`
|
|
||||||
///
|
|
||||||
/// Returns max upload size.
|
|
||||||
pub async fn get_media_config_route(
|
|
||||||
_body: Ruma<media::get_media_config::v3::Request>,
|
|
||||||
) -> Result<media::get_media_config::v3::Response> {
|
|
||||||
Ok(media::get_media_config::v3::Response {
|
|
||||||
upload_size: services().globals.max_request_size().into(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/v1/media/config`
|
|
||||||
///
|
|
||||||
/// Returns max upload size.
|
|
||||||
pub async fn get_media_config_auth_route(
|
|
||||||
_body: Ruma<get_media_config::v1::Request>,
|
|
||||||
) -> Result<get_media_config::v1::Response> {
|
|
||||||
Ok(get_media_config::v1::Response {
|
|
||||||
upload_size: services().globals.max_request_size().into(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/media/r0/upload`
|
|
||||||
///
|
|
||||||
/// Permanently save media in the server.
|
|
||||||
///
|
|
||||||
/// - Some metadata will be saved in the database
|
|
||||||
/// - Media will be saved in the media/ directory
|
|
||||||
pub async fn create_content_route(
|
|
||||||
body: Ruma<create_content::v3::Request>,
|
|
||||||
) -> Result<create_content::v3::Response> {
|
|
||||||
let mxc = format!(
|
|
||||||
"mxc://{}/{}",
|
|
||||||
services().globals.server_name(),
|
|
||||||
utils::random_string(MXC_LENGTH)
|
|
||||||
);
|
|
||||||
|
|
||||||
services()
|
|
||||||
.media
|
|
||||||
.create(
|
|
||||||
mxc.clone(),
|
|
||||||
Some(
|
|
||||||
ContentDisposition::new(ContentDispositionType::Inline)
|
|
||||||
.with_filename(body.filename.clone()),
|
|
||||||
),
|
|
||||||
body.content_type.as_deref(),
|
|
||||||
&body.file,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(create_content::v3::Response {
|
|
||||||
content_uri: mxc.into(),
|
|
||||||
blurhash: None,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_remote_content(
|
|
||||||
mxc: &str,
|
|
||||||
server_name: &ServerName,
|
|
||||||
media_id: String,
|
|
||||||
) -> Result<get_content::v1::Response, Error> {
|
|
||||||
let content_response = match services()
|
|
||||||
.sending
|
|
||||||
.send_federation_request(
|
|
||||||
server_name,
|
|
||||||
federation_media::get_content::v1::Request {
|
|
||||||
media_id: media_id.clone(),
|
|
||||||
timeout_ms: Duration::from_secs(20),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(federation_media::get_content::v1::Response {
|
|
||||||
metadata: _,
|
|
||||||
content: FileOrLocation::File(content),
|
|
||||||
}) => get_content::v1::Response {
|
|
||||||
file: content.file,
|
|
||||||
content_type: content.content_type,
|
|
||||||
content_disposition: content.content_disposition,
|
|
||||||
},
|
|
||||||
|
|
||||||
Ok(federation_media::get_content::v1::Response {
|
|
||||||
metadata: _,
|
|
||||||
content: FileOrLocation::Location(url),
|
|
||||||
}) => get_location_content(url).await?,
|
|
||||||
Err(Error::BadRequest(ErrorKind::Unrecognized, _)) => {
|
|
||||||
let media::get_content::v3::Response {
|
|
||||||
file,
|
|
||||||
content_type,
|
|
||||||
content_disposition,
|
|
||||||
..
|
|
||||||
} = services()
|
|
||||||
.sending
|
|
||||||
.send_federation_request(
|
|
||||||
server_name,
|
|
||||||
media::get_content::v3::Request {
|
|
||||||
server_name: server_name.to_owned(),
|
|
||||||
media_id,
|
|
||||||
timeout_ms: Duration::from_secs(20),
|
|
||||||
allow_remote: false,
|
|
||||||
allow_redirect: true,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
get_content::v1::Response {
|
|
||||||
file,
|
|
||||||
content_type,
|
|
||||||
content_disposition,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => return Err(e),
|
|
||||||
};
|
|
||||||
|
|
||||||
services()
|
|
||||||
.media
|
|
||||||
.create(
|
|
||||||
mxc.to_owned(),
|
|
||||||
content_response.content_disposition.clone(),
|
|
||||||
content_response.content_type.as_deref(),
|
|
||||||
&content_response.file,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(content_response)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/media/r0/download/{serverName}/{mediaId}`
|
|
||||||
///
|
|
||||||
/// Load media from our server or over federation.
|
|
||||||
///
|
|
||||||
/// - Only allows federation if `allow_remote` is true
|
|
||||||
pub async fn get_content_route(
|
|
||||||
body: Ruma<media::get_content::v3::Request>,
|
|
||||||
) -> Result<media::get_content::v3::Response> {
|
|
||||||
let get_content::v1::Response {
|
|
||||||
file,
|
|
||||||
content_disposition,
|
|
||||||
content_type,
|
|
||||||
} = get_content(&body.server_name, body.media_id.clone(), body.allow_remote).await?;
|
|
||||||
|
|
||||||
Ok(media::get_content::v3::Response {
|
|
||||||
file,
|
|
||||||
content_type,
|
|
||||||
content_disposition,
|
|
||||||
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}`
|
|
||||||
///
|
|
||||||
/// Load media from our server or over federation.
|
|
||||||
pub async fn get_content_auth_route(
|
|
||||||
body: Ruma<get_content::v1::Request>,
|
|
||||||
) -> Result<get_content::v1::Response> {
|
|
||||||
get_content(&body.server_name, body.media_id.clone(), true).await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_content(
|
|
||||||
server_name: &ServerName,
|
|
||||||
media_id: String,
|
|
||||||
allow_remote: bool,
|
|
||||||
) -> Result<get_content::v1::Response, Error> {
|
|
||||||
let mxc = format!("mxc://{}/{}", server_name, media_id);
|
|
||||||
|
|
||||||
if let Ok(Some(FileMeta {
|
|
||||||
content_disposition,
|
|
||||||
content_type,
|
|
||||||
file,
|
|
||||||
})) = services().media.get(mxc.clone()).await
|
|
||||||
{
|
|
||||||
Ok(get_content::v1::Response {
|
|
||||||
file,
|
|
||||||
content_type,
|
|
||||||
content_disposition: Some(content_disposition),
|
|
||||||
})
|
|
||||||
} else if server_name != services().globals.server_name() && allow_remote {
|
|
||||||
let remote_content_response =
|
|
||||||
get_remote_content(&mxc, server_name, media_id.clone()).await?;
|
|
||||||
|
|
||||||
Ok(get_content::v1::Response {
|
|
||||||
content_disposition: remote_content_response.content_disposition,
|
|
||||||
content_type: remote_content_response.content_type,
|
|
||||||
file: remote_content_response.file,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/media/r0/download/{serverName}/{mediaId}/{fileName}`
|
|
||||||
///
|
|
||||||
/// Load media from our server or over federation, permitting desired filename.
|
|
||||||
///
|
|
||||||
/// - Only allows federation if `allow_remote` is true
|
|
||||||
pub async fn get_content_as_filename_route(
|
|
||||||
body: Ruma<media::get_content_as_filename::v3::Request>,
|
|
||||||
) -> Result<media::get_content_as_filename::v3::Response> {
|
|
||||||
let get_content_as_filename::v1::Response {
|
|
||||||
file,
|
|
||||||
content_type,
|
|
||||||
content_disposition,
|
|
||||||
} = get_content_as_filename(
|
|
||||||
&body.server_name,
|
|
||||||
body.media_id.clone(),
|
|
||||||
body.filename.clone(),
|
|
||||||
body.allow_remote,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(media::get_content_as_filename::v3::Response {
|
|
||||||
file,
|
|
||||||
content_type,
|
|
||||||
content_disposition,
|
|
||||||
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}/{fileName}`
|
|
||||||
///
|
|
||||||
/// Load media from our server or over federation, permitting desired filename.
|
|
||||||
pub async fn get_content_as_filename_auth_route(
|
|
||||||
body: Ruma<get_content_as_filename::v1::Request>,
|
|
||||||
) -> Result<get_content_as_filename::v1::Response, Error> {
|
|
||||||
get_content_as_filename(
|
|
||||||
&body.server_name,
|
|
||||||
body.media_id.clone(),
|
|
||||||
body.filename.clone(),
|
|
||||||
true,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_content_as_filename(
|
|
||||||
server_name: &ServerName,
|
|
||||||
media_id: String,
|
|
||||||
filename: String,
|
|
||||||
allow_remote: bool,
|
|
||||||
) -> Result<get_content_as_filename::v1::Response, Error> {
|
|
||||||
let mxc = format!("mxc://{}/{}", server_name, media_id);
|
|
||||||
|
|
||||||
if let Ok(Some(FileMeta {
|
|
||||||
file, content_type, ..
|
|
||||||
})) = services().media.get(mxc.clone()).await
|
|
||||||
{
|
|
||||||
Ok(get_content_as_filename::v1::Response {
|
|
||||||
file,
|
|
||||||
content_type,
|
|
||||||
content_disposition: Some(
|
|
||||||
ContentDisposition::new(ContentDispositionType::Inline)
|
|
||||||
.with_filename(Some(filename.clone())),
|
|
||||||
),
|
|
||||||
})
|
|
||||||
} else if server_name != services().globals.server_name() && allow_remote {
|
|
||||||
let remote_content_response =
|
|
||||||
get_remote_content(&mxc, server_name, media_id.clone()).await?;
|
|
||||||
|
|
||||||
Ok(get_content_as_filename::v1::Response {
|
|
||||||
content_disposition: Some(
|
|
||||||
ContentDisposition::new(ContentDispositionType::Inline)
|
|
||||||
.with_filename(Some(filename.clone())),
|
|
||||||
),
|
|
||||||
content_type: remote_content_response.content_type,
|
|
||||||
file: remote_content_response.file,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/media/r0/thumbnail/{serverName}/{mediaId}`
|
|
||||||
///
|
|
||||||
/// Load media thumbnail from our server or over federation.
|
|
||||||
///
|
|
||||||
/// - Only allows federation if `allow_remote` is true
|
|
||||||
pub async fn get_content_thumbnail_route(
|
|
||||||
body: Ruma<media::get_content_thumbnail::v3::Request>,
|
|
||||||
) -> Result<media::get_content_thumbnail::v3::Response> {
|
|
||||||
let get_content_thumbnail::v1::Response { file, content_type } = get_content_thumbnail(
|
|
||||||
&body.server_name,
|
|
||||||
body.media_id.clone(),
|
|
||||||
body.height,
|
|
||||||
body.width,
|
|
||||||
body.method.clone(),
|
|
||||||
body.animated,
|
|
||||||
body.allow_remote,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(media::get_content_thumbnail::v3::Response {
|
|
||||||
file,
|
|
||||||
content_type,
|
|
||||||
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/v1/media/thumbnail/{serverName}/{mediaId}`
|
|
||||||
///
|
|
||||||
/// Load media thumbnail from our server or over federation.
|
|
||||||
pub async fn get_content_thumbnail_auth_route(
|
|
||||||
body: Ruma<get_content_thumbnail::v1::Request>,
|
|
||||||
) -> Result<get_content_thumbnail::v1::Response> {
|
|
||||||
get_content_thumbnail(
|
|
||||||
&body.server_name,
|
|
||||||
body.media_id.clone(),
|
|
||||||
body.height,
|
|
||||||
body.width,
|
|
||||||
body.method.clone(),
|
|
||||||
body.animated,
|
|
||||||
true,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_content_thumbnail(
|
|
||||||
server_name: &ServerName,
|
|
||||||
media_id: String,
|
|
||||||
height: UInt,
|
|
||||||
width: UInt,
|
|
||||||
method: Option<Method>,
|
|
||||||
animated: Option<bool>,
|
|
||||||
allow_remote: bool,
|
|
||||||
) -> Result<get_content_thumbnail::v1::Response, Error> {
|
|
||||||
let mxc = format!("mxc://{}/{}", server_name, media_id);
|
|
||||||
|
|
||||||
if let Ok(Some(FileMeta {
|
|
||||||
file, content_type, ..
|
|
||||||
})) = services()
|
|
||||||
.media
|
|
||||||
.get_thumbnail(
|
|
||||||
mxc.clone(),
|
|
||||||
width
|
|
||||||
.try_into()
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
|
|
||||||
height
|
|
||||||
.try_into()
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Height is invalid."))?,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(get_content_thumbnail::v1::Response { file, content_type })
|
|
||||||
} else if server_name != services().globals.server_name() && allow_remote {
|
|
||||||
let thumbnail_response = match services()
|
|
||||||
.sending
|
|
||||||
.send_federation_request(
|
|
||||||
server_name,
|
|
||||||
federation_media::get_content_thumbnail::v1::Request {
|
|
||||||
height,
|
|
||||||
width,
|
|
||||||
method: method.clone(),
|
|
||||||
media_id: media_id.clone(),
|
|
||||||
timeout_ms: Duration::from_secs(20),
|
|
||||||
animated,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(federation_media::get_content_thumbnail::v1::Response {
|
|
||||||
metadata: _,
|
|
||||||
content: FileOrLocation::File(content),
|
|
||||||
}) => get_content_thumbnail::v1::Response {
|
|
||||||
file: content.file,
|
|
||||||
content_type: content.content_type,
|
|
||||||
},
|
|
||||||
|
|
||||||
Ok(federation_media::get_content_thumbnail::v1::Response {
|
|
||||||
metadata: _,
|
|
||||||
content: FileOrLocation::Location(url),
|
|
||||||
}) => {
|
|
||||||
let get_content::v1::Response {
|
|
||||||
file, content_type, ..
|
|
||||||
} = get_location_content(url).await?;
|
|
||||||
|
|
||||||
get_content_thumbnail::v1::Response { file, content_type }
|
|
||||||
}
|
|
||||||
Err(Error::BadRequest(ErrorKind::Unrecognized, _)) => {
|
|
||||||
let media::get_content_thumbnail::v3::Response {
|
|
||||||
file, content_type, ..
|
|
||||||
} = services()
|
|
||||||
.sending
|
|
||||||
.send_federation_request(
|
|
||||||
server_name,
|
|
||||||
media::get_content_thumbnail::v3::Request {
|
|
||||||
height,
|
|
||||||
width,
|
|
||||||
method: method.clone(),
|
|
||||||
server_name: server_name.to_owned(),
|
|
||||||
media_id: media_id.clone(),
|
|
||||||
timeout_ms: Duration::from_secs(20),
|
|
||||||
allow_redirect: false,
|
|
||||||
animated,
|
|
||||||
allow_remote: false,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
get_content_thumbnail::v1::Response { file, content_type }
|
|
||||||
}
|
|
||||||
Err(e) => return Err(e),
|
|
||||||
};
|
|
||||||
|
|
||||||
services()
|
|
||||||
.media
|
|
||||||
.upload_thumbnail(
|
|
||||||
mxc,
|
|
||||||
thumbnail_response.content_type.as_deref(),
|
|
||||||
width.try_into().expect("all UInts are valid u32s"),
|
|
||||||
height.try_into().expect("all UInts are valid u32s"),
|
|
||||||
&thumbnail_response.file,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(thumbnail_response)
|
|
||||||
} else {
|
|
||||||
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_location_content(url: String) -> Result<get_content::v1::Response, Error> {
|
|
||||||
let client = services().globals.default_client();
|
|
||||||
let response = client.get(url).send().await?;
|
|
||||||
let headers = response.headers();
|
|
||||||
|
|
||||||
let content_type = headers
|
|
||||||
.get(CONTENT_TYPE)
|
|
||||||
.and_then(|header| header.to_str().ok())
|
|
||||||
.map(ToOwned::to_owned);
|
|
||||||
|
|
||||||
let content_disposition = headers
|
|
||||||
.get(CONTENT_DISPOSITION)
|
|
||||||
.map(|header| header.as_bytes())
|
|
||||||
.map(TryFrom::try_from)
|
|
||||||
.and_then(Result::ok);
|
|
||||||
|
|
||||||
let file = response.bytes().await?.to_vec();
|
|
||||||
|
|
||||||
Ok(get_content::v1::Response {
|
|
||||||
file,
|
|
||||||
content_type,
|
|
||||||
content_disposition,
|
|
||||||
})
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,273 +0,0 @@
|
||||||
use crate::{
|
|
||||||
service::{pdu::PduBuilder, rooms::timeline::PduCount},
|
|
||||||
services, utils, Error, Result, Ruma,
|
|
||||||
};
|
|
||||||
use ruma::{
|
|
||||||
api::client::{
|
|
||||||
error::ErrorKind,
|
|
||||||
message::{get_message_events, send_message_event},
|
|
||||||
},
|
|
||||||
events::{StateEventType, TimelineEventType},
|
|
||||||
};
|
|
||||||
use std::{
|
|
||||||
collections::{BTreeMap, HashSet},
|
|
||||||
sync::Arc,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/rooms/{roomId}/send/{eventType}/{txnId}`
|
|
||||||
///
|
|
||||||
/// Send a message event into the room.
|
|
||||||
///
|
|
||||||
/// - Is a NOOP if the txn id was already used before and returns the same event id again
|
|
||||||
/// - The only requirement for the content is that it has to be valid json
|
|
||||||
/// - Tries to send the event into the room, auth rules will determine if it is allowed
|
|
||||||
pub async fn send_message_event_route(
|
|
||||||
body: Ruma<send_message_event::v3::Request>,
|
|
||||||
) -> Result<send_message_event::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let sender_device = body.sender_device.as_deref();
|
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
|
||||||
services()
|
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.entry(body.room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
// Forbid m.room.encrypted if encryption is disabled
|
|
||||||
if TimelineEventType::RoomEncrypted == body.event_type.to_string().into()
|
|
||||||
&& !services().globals.allow_encryption()
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::forbidden(),
|
|
||||||
"Encryption has been disabled",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if this is a new transaction id
|
|
||||||
if let Some(response) =
|
|
||||||
services()
|
|
||||||
.transaction_ids
|
|
||||||
.existing_txnid(sender_user, sender_device, &body.txn_id)?
|
|
||||||
{
|
|
||||||
// The client might have sent a txnid of the /sendToDevice endpoint
|
|
||||||
// This txnid has no response associated with it
|
|
||||||
if response.is_empty() {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Tried to use txn id already used for an incompatible endpoint.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let event_id = utils::string_from_bytes(&response)
|
|
||||||
.map_err(|_| Error::bad_database("Invalid txnid bytes in database."))?
|
|
||||||
.try_into()
|
|
||||||
.map_err(|_| Error::bad_database("Invalid event id in txnid data."))?;
|
|
||||||
return Ok(send_message_event::v3::Response { event_id });
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut unsigned = BTreeMap::new();
|
|
||||||
unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into());
|
|
||||||
|
|
||||||
let event_id = services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: body.event_type.to_string().into(),
|
|
||||||
content: serde_json::from_str(body.body.body.json().get())
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?,
|
|
||||||
unsigned: Some(unsigned),
|
|
||||||
state_key: None,
|
|
||||||
redacts: None,
|
|
||||||
timestamp: if body.appservice_info.is_some() {
|
|
||||||
body.timestamp
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
},
|
|
||||||
},
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
services().transaction_ids.add_txnid(
|
|
||||||
sender_user,
|
|
||||||
sender_device,
|
|
||||||
&body.txn_id,
|
|
||||||
event_id.as_bytes(),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
drop(state_lock);
|
|
||||||
|
|
||||||
Ok(send_message_event::v3::Response::new(
|
|
||||||
(*event_id).to_owned(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/messages`
|
|
||||||
///
|
|
||||||
/// Allows paginating through room history.
|
|
||||||
///
|
|
||||||
/// - Only works if the user is joined (TODO: always allow, but only show events where the user was
|
|
||||||
/// joined, depending on history_visibility)
|
|
||||||
pub async fn get_message_events_route(
|
|
||||||
body: Ruma<get_message_events::v3::Request>,
|
|
||||||
) -> Result<get_message_events::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let from = match body.from.clone() {
|
|
||||||
Some(from) => PduCount::try_from_string(&from)?,
|
|
||||||
None => match body.dir {
|
|
||||||
ruma::api::Direction::Forward => PduCount::min(),
|
|
||||||
ruma::api::Direction::Backward => PduCount::max(),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
let to = body
|
|
||||||
.to
|
|
||||||
.as_ref()
|
|
||||||
.and_then(|t| PduCount::try_from_string(t).ok());
|
|
||||||
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.lazy_loading
|
|
||||||
.lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let limit = u64::from(body.limit).min(100) as usize;
|
|
||||||
|
|
||||||
let next_token;
|
|
||||||
|
|
||||||
let mut resp = get_message_events::v3::Response::new();
|
|
||||||
|
|
||||||
let mut lazy_loaded = HashSet::new();
|
|
||||||
|
|
||||||
match body.dir {
|
|
||||||
ruma::api::Direction::Forward => {
|
|
||||||
let events_after: Vec<_> = services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.pdus_after(sender_user, &body.room_id, from)?
|
|
||||||
.take(limit)
|
|
||||||
.filter_map(|r| r.ok()) // Filter out buggy events
|
|
||||||
.filter(|(_, pdu)| {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.user_can_see_event(sender_user, &body.room_id, &pdu.event_id)
|
|
||||||
.unwrap_or(false)
|
|
||||||
})
|
|
||||||
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
for (_, event) in &events_after {
|
|
||||||
/* TODO: Remove this when these are resolved:
|
|
||||||
* https://github.com/vector-im/element-android/issues/3417
|
|
||||||
* https://github.com/vector-im/element-web/issues/21034
|
|
||||||
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
|
||||||
sender_user,
|
|
||||||
sender_device,
|
|
||||||
&body.room_id,
|
|
||||||
&event.sender,
|
|
||||||
)? {
|
|
||||||
lazy_loaded.insert(event.sender.clone());
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
lazy_loaded.insert(event.sender.clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
next_token = events_after.last().map(|(count, _)| count).copied();
|
|
||||||
|
|
||||||
let events_after: Vec<_> = events_after
|
|
||||||
.into_iter()
|
|
||||||
.map(|(_, pdu)| pdu.to_room_event())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
resp.start = from.stringify();
|
|
||||||
resp.end = next_token.map(|count| count.stringify());
|
|
||||||
resp.chunk = events_after;
|
|
||||||
}
|
|
||||||
ruma::api::Direction::Backward => {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.backfill_if_required(&body.room_id, from)
|
|
||||||
.await?;
|
|
||||||
let events_before: Vec<_> = services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.pdus_until(sender_user, &body.room_id, from)?
|
|
||||||
.take(limit)
|
|
||||||
.filter_map(|r| r.ok()) // Filter out buggy events
|
|
||||||
.filter(|(_, pdu)| {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.user_can_see_event(sender_user, &body.room_id, &pdu.event_id)
|
|
||||||
.unwrap_or(false)
|
|
||||||
})
|
|
||||||
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
for (_, event) in &events_before {
|
|
||||||
/* TODO: Remove this when these are resolved:
|
|
||||||
* https://github.com/vector-im/element-android/issues/3417
|
|
||||||
* https://github.com/vector-im/element-web/issues/21034
|
|
||||||
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
|
||||||
sender_user,
|
|
||||||
sender_device,
|
|
||||||
&body.room_id,
|
|
||||||
&event.sender,
|
|
||||||
)? {
|
|
||||||
lazy_loaded.insert(event.sender.clone());
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
lazy_loaded.insert(event.sender.clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
next_token = events_before.last().map(|(count, _)| count).copied();
|
|
||||||
|
|
||||||
let events_before: Vec<_> = events_before
|
|
||||||
.into_iter()
|
|
||||||
.map(|(_, pdu)| pdu.to_room_event())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
resp.start = from.stringify();
|
|
||||||
resp.end = next_token.map(|count| count.stringify());
|
|
||||||
resp.chunk = events_before;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resp.state = Vec::new();
|
|
||||||
for ll_id in &lazy_loaded {
|
|
||||||
if let Some(member_event) = services().rooms.state_accessor.room_state_get(
|
|
||||||
&body.room_id,
|
|
||||||
&StateEventType::RoomMember,
|
|
||||||
ll_id.as_str(),
|
|
||||||
)? {
|
|
||||||
resp.state.push(member_event.to_state_event());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: enable again when we are sure clients can handle it
|
|
||||||
/*
|
|
||||||
if let Some(next_token) = next_token {
|
|
||||||
services().rooms.lazy_loading.lazy_load_mark_sent(
|
|
||||||
sender_user,
|
|
||||||
sender_device,
|
|
||||||
&body.room_id,
|
|
||||||
lazy_loaded,
|
|
||||||
next_token,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
Ok(resp)
|
|
||||||
}
|
|
|
@ -1,23 +0,0 @@
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use ruma::{api::client::account, authentication::TokenType};
|
|
||||||
|
|
||||||
use crate::{services, Result, Ruma};
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/user/{userId}/openid/request_token`
|
|
||||||
///
|
|
||||||
/// Request an OpenID token to verify identity with third-party services.
|
|
||||||
///
|
|
||||||
/// - The token generated is only valid for the OpenID API.
|
|
||||||
pub async fn create_openid_token_route(
|
|
||||||
body: Ruma<account::request_openid_token::v3::Request>,
|
|
||||||
) -> Result<account::request_openid_token::v3::Response> {
|
|
||||||
let (access_token, expires_in) = services().users.create_openid_token(&body.user_id)?;
|
|
||||||
|
|
||||||
Ok(account::request_openid_token::v3::Response {
|
|
||||||
access_token,
|
|
||||||
token_type: TokenType::Bearer,
|
|
||||||
matrix_server_name: services().globals.server_name().to_owned(),
|
|
||||||
expires_in: Duration::from_secs(expires_in),
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,327 +0,0 @@
|
||||||
use crate::{service::pdu::PduBuilder, services, utils, Error, Result, Ruma};
|
|
||||||
use ruma::{
|
|
||||||
api::{
|
|
||||||
client::{
|
|
||||||
error::ErrorKind,
|
|
||||||
profile::{
|
|
||||||
get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
federation::{self, query::get_profile_information::v1::ProfileField},
|
|
||||||
},
|
|
||||||
events::{room::member::RoomMemberEventContent, StateEventType, TimelineEventType},
|
|
||||||
};
|
|
||||||
use serde_json::value::to_raw_value;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/profile/{userId}/displayname`
|
|
||||||
///
|
|
||||||
/// Updates the displayname.
|
|
||||||
///
|
|
||||||
/// - Also makes sure other users receive the update using presence EDUs
|
|
||||||
pub async fn set_displayname_route(
|
|
||||||
body: Ruma<set_display_name::v3::Request>,
|
|
||||||
) -> Result<set_display_name::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
services()
|
|
||||||
.users
|
|
||||||
.set_displayname(sender_user, body.displayname.clone())?;
|
|
||||||
|
|
||||||
// Send a new membership event and presence update into all joined rooms
|
|
||||||
let all_rooms_joined: Vec<_> = services()
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.rooms_joined(sender_user)
|
|
||||||
.filter_map(|r| r.ok())
|
|
||||||
.map(|room_id| {
|
|
||||||
Ok::<_, Error>((
|
|
||||||
PduBuilder {
|
|
||||||
event_type: TimelineEventType::RoomMember,
|
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
|
||||||
displayname: body.displayname.clone(),
|
|
||||||
join_authorized_via_users_server: None,
|
|
||||||
..serde_json::from_str(
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_get(
|
|
||||||
&room_id,
|
|
||||||
&StateEventType::RoomMember,
|
|
||||||
sender_user.as_str(),
|
|
||||||
)?
|
|
||||||
.ok_or_else(|| {
|
|
||||||
Error::bad_database(
|
|
||||||
"Tried to send displayname update for user not in the \
|
|
||||||
room.",
|
|
||||||
)
|
|
||||||
})?
|
|
||||||
.content
|
|
||||||
.get(),
|
|
||||||
)
|
|
||||||
.map_err(|_| Error::bad_database("Database contains invalid PDU."))?
|
|
||||||
})
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some(sender_user.to_string()),
|
|
||||||
redacts: None,
|
|
||||||
timestamp: None,
|
|
||||||
},
|
|
||||||
room_id,
|
|
||||||
))
|
|
||||||
})
|
|
||||||
.filter_map(|r| r.ok())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
for (pdu_builder, room_id) in all_rooms_joined {
|
|
||||||
let mutex_state = Arc::clone(
|
|
||||||
services()
|
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.entry(room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
let _ = services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
// Presence update
|
|
||||||
services().rooms.edus.presence.update_presence(
|
|
||||||
sender_user,
|
|
||||||
&room_id,
|
|
||||||
ruma::events::presence::PresenceEvent {
|
|
||||||
content: ruma::events::presence::PresenceEventContent {
|
|
||||||
avatar_url: services().users.avatar_url(sender_user)?,
|
|
||||||
currently_active: None,
|
|
||||||
displayname: services().users.displayname(sender_user)?,
|
|
||||||
last_active_ago: Some(
|
|
||||||
utils::millis_since_unix_epoch()
|
|
||||||
.try_into()
|
|
||||||
.expect("time is valid"),
|
|
||||||
),
|
|
||||||
presence: ruma::presence::PresenceState::Online,
|
|
||||||
status_msg: None,
|
|
||||||
},
|
|
||||||
sender: sender_user.clone(),
|
|
||||||
},
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(set_display_name::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/profile/{userId}/displayname`
|
|
||||||
///
|
|
||||||
/// Returns the displayname of the user.
|
|
||||||
///
|
|
||||||
/// - If user is on another server: Fetches displayname over federation
|
|
||||||
pub async fn get_displayname_route(
|
|
||||||
body: Ruma<get_display_name::v3::Request>,
|
|
||||||
) -> Result<get_display_name::v3::Response> {
|
|
||||||
if body.user_id.server_name() != services().globals.server_name() {
|
|
||||||
let response = services()
|
|
||||||
.sending
|
|
||||||
.send_federation_request(
|
|
||||||
body.user_id.server_name(),
|
|
||||||
federation::query::get_profile_information::v1::Request {
|
|
||||||
user_id: body.user_id.clone(),
|
|
||||||
field: Some(ProfileField::DisplayName),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
return Ok(get_display_name::v3::Response {
|
|
||||||
displayname: response.displayname,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(get_display_name::v3::Response {
|
|
||||||
displayname: services().users.displayname(&body.user_id)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/profile/{userId}/avatar_url`
|
|
||||||
///
|
|
||||||
/// Updates the avatar_url and blurhash.
|
|
||||||
///
|
|
||||||
/// - Also makes sure other users receive the update using presence EDUs
|
|
||||||
pub async fn set_avatar_url_route(
|
|
||||||
body: Ruma<set_avatar_url::v3::Request>,
|
|
||||||
) -> Result<set_avatar_url::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
services()
|
|
||||||
.users
|
|
||||||
.set_avatar_url(sender_user, body.avatar_url.clone())?;
|
|
||||||
|
|
||||||
services()
|
|
||||||
.users
|
|
||||||
.set_blurhash(sender_user, body.blurhash.clone())?;
|
|
||||||
|
|
||||||
// Send a new membership event and presence update into all joined rooms
|
|
||||||
let all_joined_rooms: Vec<_> = services()
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.rooms_joined(sender_user)
|
|
||||||
.filter_map(|r| r.ok())
|
|
||||||
.map(|room_id| {
|
|
||||||
Ok::<_, Error>((
|
|
||||||
PduBuilder {
|
|
||||||
event_type: TimelineEventType::RoomMember,
|
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
|
||||||
avatar_url: body.avatar_url.clone(),
|
|
||||||
join_authorized_via_users_server: None,
|
|
||||||
..serde_json::from_str(
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_get(
|
|
||||||
&room_id,
|
|
||||||
&StateEventType::RoomMember,
|
|
||||||
sender_user.as_str(),
|
|
||||||
)?
|
|
||||||
.ok_or_else(|| {
|
|
||||||
Error::bad_database(
|
|
||||||
"Tried to send displayname update for user not in the \
|
|
||||||
room.",
|
|
||||||
)
|
|
||||||
})?
|
|
||||||
.content
|
|
||||||
.get(),
|
|
||||||
)
|
|
||||||
.map_err(|_| Error::bad_database("Database contains invalid PDU."))?
|
|
||||||
})
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some(sender_user.to_string()),
|
|
||||||
redacts: None,
|
|
||||||
timestamp: None,
|
|
||||||
},
|
|
||||||
room_id,
|
|
||||||
))
|
|
||||||
})
|
|
||||||
.filter_map(|r| r.ok())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
for (pdu_builder, room_id) in all_joined_rooms {
|
|
||||||
let mutex_state = Arc::clone(
|
|
||||||
services()
|
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.entry(room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
let _ = services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
// Presence update
|
|
||||||
services().rooms.edus.presence.update_presence(
|
|
||||||
sender_user,
|
|
||||||
&room_id,
|
|
||||||
ruma::events::presence::PresenceEvent {
|
|
||||||
content: ruma::events::presence::PresenceEventContent {
|
|
||||||
avatar_url: services().users.avatar_url(sender_user)?,
|
|
||||||
currently_active: None,
|
|
||||||
displayname: services().users.displayname(sender_user)?,
|
|
||||||
last_active_ago: Some(
|
|
||||||
utils::millis_since_unix_epoch()
|
|
||||||
.try_into()
|
|
||||||
.expect("time is valid"),
|
|
||||||
),
|
|
||||||
presence: ruma::presence::PresenceState::Online,
|
|
||||||
status_msg: None,
|
|
||||||
},
|
|
||||||
sender: sender_user.clone(),
|
|
||||||
},
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(set_avatar_url::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/profile/{userId}/avatar_url`
|
|
||||||
///
|
|
||||||
/// Returns the avatar_url and blurhash of the user.
|
|
||||||
///
|
|
||||||
/// - If user is on another server: Fetches avatar_url and blurhash over federation
|
|
||||||
pub async fn get_avatar_url_route(
|
|
||||||
body: Ruma<get_avatar_url::v3::Request>,
|
|
||||||
) -> Result<get_avatar_url::v3::Response> {
|
|
||||||
if body.user_id.server_name() != services().globals.server_name() {
|
|
||||||
let response = services()
|
|
||||||
.sending
|
|
||||||
.send_federation_request(
|
|
||||||
body.user_id.server_name(),
|
|
||||||
federation::query::get_profile_information::v1::Request {
|
|
||||||
user_id: body.user_id.clone(),
|
|
||||||
field: Some(ProfileField::AvatarUrl),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
return Ok(get_avatar_url::v3::Response {
|
|
||||||
avatar_url: response.avatar_url,
|
|
||||||
blurhash: response.blurhash,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(get_avatar_url::v3::Response {
|
|
||||||
avatar_url: services().users.avatar_url(&body.user_id)?,
|
|
||||||
blurhash: services().users.blurhash(&body.user_id)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/profile/{userId}`
|
|
||||||
///
|
|
||||||
/// Returns the displayname, avatar_url and blurhash of the user.
|
|
||||||
///
|
|
||||||
/// - If user is on another server: Fetches profile over federation
|
|
||||||
pub async fn get_profile_route(
|
|
||||||
body: Ruma<get_profile::v3::Request>,
|
|
||||||
) -> Result<get_profile::v3::Response> {
|
|
||||||
if body.user_id.server_name() != services().globals.server_name() {
|
|
||||||
let response = services()
|
|
||||||
.sending
|
|
||||||
.send_federation_request(
|
|
||||||
body.user_id.server_name(),
|
|
||||||
federation::query::get_profile_information::v1::Request {
|
|
||||||
user_id: body.user_id.clone(),
|
|
||||||
field: None,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
return Ok(get_profile::v3::Response {
|
|
||||||
displayname: response.displayname,
|
|
||||||
avatar_url: response.avatar_url,
|
|
||||||
blurhash: response.blurhash,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
if !services().users.exists(&body.user_id)? {
|
|
||||||
// Return 404 if this user doesn't exist
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Profile was not found.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(get_profile::v3::Response {
|
|
||||||
avatar_url: services().users.avatar_url(&body.user_id)?,
|
|
||||||
blurhash: services().users.blurhash(&body.user_id)?,
|
|
||||||
displayname: services().users.displayname(&body.user_id)?,
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,432 +0,0 @@
|
||||||
use crate::{services, Error, Result, Ruma};
|
|
||||||
use ruma::{
|
|
||||||
api::client::{
|
|
||||||
error::ErrorKind,
|
|
||||||
push::{
|
|
||||||
delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled,
|
|
||||||
get_pushrules_all, set_pusher, set_pushrule, set_pushrule_actions,
|
|
||||||
set_pushrule_enabled, RuleScope,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
events::{push_rules::PushRulesEvent, GlobalAccountDataEventType},
|
|
||||||
push::{InsertPushRuleError, RemovePushRuleError},
|
|
||||||
};
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/pushrules`
|
|
||||||
///
|
|
||||||
/// Retrieves the push rules event for this user.
|
|
||||||
pub async fn get_pushrules_all_route(
|
|
||||||
body: Ruma<get_pushrules_all::v3::Request>,
|
|
||||||
) -> Result<get_pushrules_all::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let event = services()
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"PushRules event not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
|
|
||||||
.content;
|
|
||||||
|
|
||||||
Ok(get_pushrules_all::v3::Response {
|
|
||||||
global: account_data.global,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
|
|
||||||
///
|
|
||||||
/// Retrieves a single specified push rule for this user.
|
|
||||||
pub async fn get_pushrule_route(
|
|
||||||
body: Ruma<get_pushrule::v3::Request>,
|
|
||||||
) -> Result<get_pushrule::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let event = services()
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"PushRules event not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
|
|
||||||
.content;
|
|
||||||
|
|
||||||
let rule = account_data
|
|
||||||
.global
|
|
||||||
.get(body.kind.clone(), &body.rule_id)
|
|
||||||
.map(Into::into);
|
|
||||||
|
|
||||||
if let Some(rule) = rule {
|
|
||||||
Ok(get_pushrule::v3::Response { rule })
|
|
||||||
} else {
|
|
||||||
Err(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Push rule not found.",
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
|
|
||||||
///
|
|
||||||
/// Creates a single specified push rule for this user.
|
|
||||||
pub async fn set_pushrule_route(
|
|
||||||
body: Ruma<set_pushrule::v3::Request>,
|
|
||||||
) -> Result<set_pushrule::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let body = body.body;
|
|
||||||
|
|
||||||
if body.scope != RuleScope::Global {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Scopes other than 'global' are not supported.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let event = services()
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"PushRules event not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
|
||||||
|
|
||||||
if let Err(error) = account_data.content.global.insert(
|
|
||||||
body.rule.clone(),
|
|
||||||
body.after.as_deref(),
|
|
||||||
body.before.as_deref(),
|
|
||||||
) {
|
|
||||||
let err = match error {
|
|
||||||
InsertPushRuleError::ServerDefaultRuleId => Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Rule IDs starting with a dot are reserved for server-default rules.",
|
|
||||||
),
|
|
||||||
InsertPushRuleError::InvalidRuleId => Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Rule ID containing invalid characters.",
|
|
||||||
),
|
|
||||||
InsertPushRuleError::RelativeToServerDefaultRule => Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Can't place a push rule relatively to a server-default rule.",
|
|
||||||
),
|
|
||||||
InsertPushRuleError::UnknownRuleId => Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"The before or after rule could not be found.",
|
|
||||||
),
|
|
||||||
InsertPushRuleError::BeforeHigherThanAfter => Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"The before rule has a higher priority than the after rule.",
|
|
||||||
),
|
|
||||||
_ => Error::BadRequest(ErrorKind::InvalidParam, "Invalid data."),
|
|
||||||
};
|
|
||||||
|
|
||||||
return Err(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
services().account_data.update(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
&serde_json::to_value(account_data).expect("to json value always works"),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(set_pushrule::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions`
|
|
||||||
///
|
|
||||||
/// Gets the actions of a single specified push rule for this user.
|
|
||||||
pub async fn get_pushrule_actions_route(
|
|
||||||
body: Ruma<get_pushrule_actions::v3::Request>,
|
|
||||||
) -> Result<get_pushrule_actions::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if body.scope != RuleScope::Global {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Scopes other than 'global' are not supported.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let event = services()
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"PushRules event not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
|
|
||||||
.content;
|
|
||||||
|
|
||||||
let global = account_data.global;
|
|
||||||
let actions = global
|
|
||||||
.get(body.kind.clone(), &body.rule_id)
|
|
||||||
.map(|rule| rule.actions().to_owned())
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Push rule not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
Ok(get_pushrule_actions::v3::Response { actions })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions`
|
|
||||||
///
|
|
||||||
/// Sets the actions of a single specified push rule for this user.
|
|
||||||
pub async fn set_pushrule_actions_route(
|
|
||||||
body: Ruma<set_pushrule_actions::v3::Request>,
|
|
||||||
) -> Result<set_pushrule_actions::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if body.scope != RuleScope::Global {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Scopes other than 'global' are not supported.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let event = services()
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"PushRules event not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
|
||||||
|
|
||||||
if account_data
|
|
||||||
.content
|
|
||||||
.global
|
|
||||||
.set_actions(body.kind.clone(), &body.rule_id, body.actions.clone())
|
|
||||||
.is_err()
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Push rule not found.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
services().account_data.update(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
&serde_json::to_value(account_data).expect("to json value always works"),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(set_pushrule_actions::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled`
|
|
||||||
///
|
|
||||||
/// Gets the enabled status of a single specified push rule for this user.
|
|
||||||
pub async fn get_pushrule_enabled_route(
|
|
||||||
body: Ruma<get_pushrule_enabled::v3::Request>,
|
|
||||||
) -> Result<get_pushrule_enabled::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if body.scope != RuleScope::Global {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Scopes other than 'global' are not supported.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let event = services()
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"PushRules event not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
|
||||||
|
|
||||||
let global = account_data.content.global;
|
|
||||||
let enabled = global
|
|
||||||
.get(body.kind.clone(), &body.rule_id)
|
|
||||||
.map(|r| r.enabled())
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Push rule not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
Ok(get_pushrule_enabled::v3::Response { enabled })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled`
|
|
||||||
///
|
|
||||||
/// Sets the enabled status of a single specified push rule for this user.
|
|
||||||
pub async fn set_pushrule_enabled_route(
|
|
||||||
body: Ruma<set_pushrule_enabled::v3::Request>,
|
|
||||||
) -> Result<set_pushrule_enabled::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if body.scope != RuleScope::Global {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Scopes other than 'global' are not supported.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let event = services()
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"PushRules event not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
|
||||||
|
|
||||||
if account_data
|
|
||||||
.content
|
|
||||||
.global
|
|
||||||
.set_enabled(body.kind.clone(), &body.rule_id, body.enabled)
|
|
||||||
.is_err()
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Push rule not found.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
services().account_data.update(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
&serde_json::to_value(account_data).expect("to json value always works"),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(set_pushrule_enabled::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `DELETE /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
|
|
||||||
///
|
|
||||||
/// Deletes a single specified push rule for this user.
|
|
||||||
pub async fn delete_pushrule_route(
|
|
||||||
body: Ruma<delete_pushrule::v3::Request>,
|
|
||||||
) -> Result<delete_pushrule::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if body.scope != RuleScope::Global {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Scopes other than 'global' are not supported.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let event = services()
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"PushRules event not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
|
||||||
|
|
||||||
if let Err(error) = account_data
|
|
||||||
.content
|
|
||||||
.global
|
|
||||||
.remove(body.kind.clone(), &body.rule_id)
|
|
||||||
{
|
|
||||||
let err = match error {
|
|
||||||
RemovePushRuleError::ServerDefault => Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Cannot delete a server-default pushrule.",
|
|
||||||
),
|
|
||||||
RemovePushRuleError::NotFound => {
|
|
||||||
Error::BadRequest(ErrorKind::NotFound, "Push rule not found.")
|
|
||||||
}
|
|
||||||
_ => Error::BadRequest(ErrorKind::InvalidParam, "Invalid data."),
|
|
||||||
};
|
|
||||||
|
|
||||||
return Err(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
services().account_data.update(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
&serde_json::to_value(account_data).expect("to json value always works"),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(delete_pushrule::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/pushers`
|
|
||||||
///
|
|
||||||
/// Gets all currently active pushers for the sender user.
|
|
||||||
pub async fn get_pushers_route(
|
|
||||||
body: Ruma<get_pushers::v3::Request>,
|
|
||||||
) -> Result<get_pushers::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
Ok(get_pushers::v3::Response {
|
|
||||||
pushers: services().pusher.get_pushers(sender_user)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/pushers/set`
|
|
||||||
///
|
|
||||||
/// Adds a pusher for the sender user.
|
|
||||||
///
|
|
||||||
/// - TODO: Handle `append`
|
|
||||||
pub async fn set_pushers_route(
|
|
||||||
body: Ruma<set_pusher::v3::Request>,
|
|
||||||
) -> Result<set_pusher::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
services()
|
|
||||||
.pusher
|
|
||||||
.set_pusher(sender_user, body.action.clone())?;
|
|
||||||
|
|
||||||
Ok(set_pusher::v3::Response::default())
|
|
||||||
}
|
|
|
@ -1,182 +0,0 @@
|
||||||
use crate::{service::rooms::timeline::PduCount, services, Error, Result, Ruma};
|
|
||||||
use ruma::{
|
|
||||||
api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt},
|
|
||||||
events::{
|
|
||||||
receipt::{ReceiptThread, ReceiptType},
|
|
||||||
RoomAccountDataEventType,
|
|
||||||
},
|
|
||||||
MilliSecondsSinceUnixEpoch,
|
|
||||||
};
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers`
|
|
||||||
///
|
|
||||||
/// Sets different types of read markers.
|
|
||||||
///
|
|
||||||
/// - Updates fully-read account data event to `fully_read`
|
|
||||||
/// - If `read_receipt` is set: Update private marker and public read receipt EDU
|
|
||||||
pub async fn set_read_marker_route(
|
|
||||||
body: Ruma<set_read_marker::v3::Request>,
|
|
||||||
) -> Result<set_read_marker::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if let Some(fully_read) = &body.fully_read {
|
|
||||||
let fully_read_event = ruma::events::fully_read::FullyReadEvent {
|
|
||||||
content: ruma::events::fully_read::FullyReadEventContent {
|
|
||||||
event_id: fully_read.clone(),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
services().account_data.update(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
RoomAccountDataEventType::FullyRead,
|
|
||||||
&serde_json::to_value(fully_read_event).expect("to json value always works"),
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if body.private_read_receipt.is_some() || body.read_receipt.is_some() {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.user
|
|
||||||
.reset_notification_counts(sender_user, &body.room_id)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(event) = &body.private_read_receipt {
|
|
||||||
let count = services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.get_pdu_count(event)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Event does not exist.",
|
|
||||||
))?;
|
|
||||||
let count = match count {
|
|
||||||
PduCount::Backfilled(_) => {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Read receipt is in backfilled timeline",
|
|
||||||
))
|
|
||||||
}
|
|
||||||
PduCount::Normal(c) => c,
|
|
||||||
};
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.edus
|
|
||||||
.read_receipt
|
|
||||||
.private_read_set(&body.room_id, sender_user, count)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(event) = &body.read_receipt {
|
|
||||||
let mut user_receipts = BTreeMap::new();
|
|
||||||
user_receipts.insert(
|
|
||||||
sender_user.clone(),
|
|
||||||
ruma::events::receipt::Receipt {
|
|
||||||
ts: Some(MilliSecondsSinceUnixEpoch::now()),
|
|
||||||
thread: ReceiptThread::Unthreaded,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut receipts = BTreeMap::new();
|
|
||||||
receipts.insert(ReceiptType::Read, user_receipts);
|
|
||||||
|
|
||||||
let mut receipt_content = BTreeMap::new();
|
|
||||||
receipt_content.insert(event.to_owned(), receipts);
|
|
||||||
|
|
||||||
services().rooms.edus.read_receipt.readreceipt_update(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
ruma::events::receipt::ReceiptEvent {
|
|
||||||
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
|
|
||||||
room_id: body.room_id.clone(),
|
|
||||||
},
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(set_read_marker::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}`
|
|
||||||
///
|
|
||||||
/// Sets private read marker and public read receipt EDU.
|
|
||||||
pub async fn create_receipt_route(
|
|
||||||
body: Ruma<create_receipt::v3::Request>,
|
|
||||||
) -> Result<create_receipt::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if matches!(
|
|
||||||
&body.receipt_type,
|
|
||||||
create_receipt::v3::ReceiptType::Read | create_receipt::v3::ReceiptType::ReadPrivate
|
|
||||||
) {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.user
|
|
||||||
.reset_notification_counts(sender_user, &body.room_id)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
match body.receipt_type {
|
|
||||||
create_receipt::v3::ReceiptType::FullyRead => {
|
|
||||||
let fully_read_event = ruma::events::fully_read::FullyReadEvent {
|
|
||||||
content: ruma::events::fully_read::FullyReadEventContent {
|
|
||||||
event_id: body.event_id.clone(),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
services().account_data.update(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
RoomAccountDataEventType::FullyRead,
|
|
||||||
&serde_json::to_value(fully_read_event).expect("to json value always works"),
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
create_receipt::v3::ReceiptType::Read => {
|
|
||||||
let mut user_receipts = BTreeMap::new();
|
|
||||||
user_receipts.insert(
|
|
||||||
sender_user.clone(),
|
|
||||||
ruma::events::receipt::Receipt {
|
|
||||||
ts: Some(MilliSecondsSinceUnixEpoch::now()),
|
|
||||||
thread: ReceiptThread::Unthreaded,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
let mut receipts = BTreeMap::new();
|
|
||||||
receipts.insert(ReceiptType::Read, user_receipts);
|
|
||||||
|
|
||||||
let mut receipt_content = BTreeMap::new();
|
|
||||||
receipt_content.insert(body.event_id.to_owned(), receipts);
|
|
||||||
|
|
||||||
services().rooms.edus.read_receipt.readreceipt_update(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
ruma::events::receipt::ReceiptEvent {
|
|
||||||
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
|
|
||||||
room_id: body.room_id.clone(),
|
|
||||||
},
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
create_receipt::v3::ReceiptType::ReadPrivate => {
|
|
||||||
let count = services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.get_pdu_count(&body.event_id)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Event does not exist.",
|
|
||||||
))?;
|
|
||||||
let count = match count {
|
|
||||||
PduCount::Backfilled(_) => {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Read receipt is in backfilled timeline",
|
|
||||||
))
|
|
||||||
}
|
|
||||||
PduCount::Normal(c) => c,
|
|
||||||
};
|
|
||||||
services().rooms.edus.read_receipt.private_read_set(
|
|
||||||
&body.room_id,
|
|
||||||
sender_user,
|
|
||||||
count,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
_ => return Err(Error::bad_database("Unsupported receipt type")),
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(create_receipt::v3::Response {})
|
|
||||||
}
|
|
|
@ -1,59 +0,0 @@
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use crate::{service::pdu::PduBuilder, services, Result, Ruma};
|
|
||||||
use ruma::{
|
|
||||||
api::client::redact::redact_event,
|
|
||||||
events::{room::redaction::RoomRedactionEventContent, TimelineEventType},
|
|
||||||
};
|
|
||||||
|
|
||||||
use serde_json::value::to_raw_value;
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/rooms/{roomId}/redact/{eventId}/{txnId}`
|
|
||||||
///
|
|
||||||
/// Tries to send a redaction event into the room.
|
|
||||||
///
|
|
||||||
/// - TODO: Handle txn id
|
|
||||||
pub async fn redact_event_route(
|
|
||||||
body: Ruma<redact_event::v3::Request>,
|
|
||||||
) -> Result<redact_event::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let body = body.body;
|
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
|
||||||
services()
|
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.entry(body.room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
let event_id = services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: TimelineEventType::RoomRedaction,
|
|
||||||
content: to_raw_value(&RoomRedactionEventContent {
|
|
||||||
redacts: Some(body.event_id.clone()),
|
|
||||||
reason: body.reason.clone(),
|
|
||||||
})
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: None,
|
|
||||||
redacts: Some(body.event_id.into()),
|
|
||||||
timestamp: None,
|
|
||||||
},
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
drop(state_lock);
|
|
||||||
|
|
||||||
let event_id = (*event_id).to_owned();
|
|
||||||
Ok(redact_event::v3::Response { event_id })
|
|
||||||
}
|
|
|
@ -1,91 +0,0 @@
|
||||||
use ruma::api::client::relations::{
|
|
||||||
get_relating_events, get_relating_events_with_rel_type,
|
|
||||||
get_relating_events_with_rel_type_and_event_type,
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{services, Result, Ruma};
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}`
|
|
||||||
pub async fn get_relating_events_with_rel_type_and_event_type_route(
|
|
||||||
body: Ruma<get_relating_events_with_rel_type_and_event_type::v1::Request>,
|
|
||||||
) -> Result<get_relating_events_with_rel_type_and_event_type::v1::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let res = services()
|
|
||||||
.rooms
|
|
||||||
.pdu_metadata
|
|
||||||
.paginate_relations_with_filter(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
&body.event_id,
|
|
||||||
Some(body.event_type.clone()),
|
|
||||||
Some(body.rel_type.clone()),
|
|
||||||
body.from.clone(),
|
|
||||||
body.to.clone(),
|
|
||||||
body.limit,
|
|
||||||
body.recurse,
|
|
||||||
&body.dir,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(
|
|
||||||
get_relating_events_with_rel_type_and_event_type::v1::Response {
|
|
||||||
chunk: res.chunk,
|
|
||||||
next_batch: res.next_batch,
|
|
||||||
prev_batch: res.prev_batch,
|
|
||||||
recursion_depth: res.recursion_depth,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}`
|
|
||||||
pub async fn get_relating_events_with_rel_type_route(
|
|
||||||
body: Ruma<get_relating_events_with_rel_type::v1::Request>,
|
|
||||||
) -> Result<get_relating_events_with_rel_type::v1::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let res = services()
|
|
||||||
.rooms
|
|
||||||
.pdu_metadata
|
|
||||||
.paginate_relations_with_filter(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
&body.event_id,
|
|
||||||
None,
|
|
||||||
Some(body.rel_type.clone()),
|
|
||||||
body.from.clone(),
|
|
||||||
body.to.clone(),
|
|
||||||
body.limit,
|
|
||||||
body.recurse,
|
|
||||||
&body.dir,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(get_relating_events_with_rel_type::v1::Response {
|
|
||||||
chunk: res.chunk,
|
|
||||||
next_batch: res.next_batch,
|
|
||||||
prev_batch: res.prev_batch,
|
|
||||||
recursion_depth: res.recursion_depth,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}`
|
|
||||||
pub async fn get_relating_events_route(
|
|
||||||
body: Ruma<get_relating_events::v1::Request>,
|
|
||||||
) -> Result<get_relating_events::v1::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.pdu_metadata
|
|
||||||
.paginate_relations_with_filter(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
&body.event_id,
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
body.from.clone(),
|
|
||||||
body.to.clone(),
|
|
||||||
body.limit,
|
|
||||||
body.recurse,
|
|
||||||
&body.dir,
|
|
||||||
)
|
|
||||||
}
|
|
|
@ -1,69 +0,0 @@
|
||||||
use crate::{services, utils::HtmlEscape, Error, Result, Ruma};
|
|
||||||
use ruma::{
|
|
||||||
api::client::{error::ErrorKind, room::report_content},
|
|
||||||
events::room::message,
|
|
||||||
int,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/report/{eventId}`
|
|
||||||
///
|
|
||||||
/// Reports an inappropriate event to homeserver admins
|
|
||||||
///
|
|
||||||
pub async fn report_event_route(
|
|
||||||
body: Ruma<report_content::v3::Request>,
|
|
||||||
) -> Result<report_content::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let pdu = match services().rooms.timeline.get_pdu(&body.event_id)? {
|
|
||||||
Some(pdu) => pdu,
|
|
||||||
_ => {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Invalid Event ID",
|
|
||||||
))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(true) = body.score.map(|s| s > int!(0) || s < int!(-100)) {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Invalid score, must be within 0 to -100",
|
|
||||||
));
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(true) = body.reason.clone().map(|s| s.chars().count() > 250) {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Reason too long, should be 250 characters or fewer",
|
|
||||||
));
|
|
||||||
};
|
|
||||||
|
|
||||||
services().admin
|
|
||||||
.send_message(message::RoomMessageEventContent::text_html(
|
|
||||||
format!(
|
|
||||||
"Report received from: {}\n\n\
|
|
||||||
Event ID: {:?}\n\
|
|
||||||
Room ID: {:?}\n\
|
|
||||||
Sent By: {:?}\n\n\
|
|
||||||
Report Score: {:?}\n\
|
|
||||||
Report Reason: {:?}",
|
|
||||||
sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason
|
|
||||||
),
|
|
||||||
format!(
|
|
||||||
"<details><summary>Report received from: <a href=\"https://matrix.to/#/{0:?}\">{0:?}\
|
|
||||||
</a></summary><ul><li>Event Info<ul><li>Event ID: <code>{1:?}</code>\
|
|
||||||
<a href=\"https://matrix.to/#/{2:?}/{1:?}\">🔗</a></li><li>Room ID: <code>{2:?}</code>\
|
|
||||||
</li><li>Sent By: <a href=\"https://matrix.to/#/{3:?}\">{3:?}</a></li></ul></li><li>\
|
|
||||||
Report Info<ul><li>Report Score: {4:?}</li><li>Report Reason: {5}</li></ul></li>\
|
|
||||||
</ul></details>",
|
|
||||||
sender_user,
|
|
||||||
pdu.event_id,
|
|
||||||
pdu.room_id,
|
|
||||||
pdu.sender,
|
|
||||||
body.score,
|
|
||||||
HtmlEscape(body.reason.as_deref().unwrap_or(""))
|
|
||||||
),
|
|
||||||
));
|
|
||||||
|
|
||||||
Ok(report_content::v3::Response {})
|
|
||||||
}
|
|
|
@ -1,878 +0,0 @@
|
||||||
use crate::{
|
|
||||||
api::client_server::invite_helper, service::pdu::PduBuilder, services, Error, Result, Ruma,
|
|
||||||
};
|
|
||||||
use ruma::{
|
|
||||||
api::client::{
|
|
||||||
error::ErrorKind,
|
|
||||||
room::{self, aliases, create_room, get_room_event, upgrade_room},
|
|
||||||
},
|
|
||||||
events::{
|
|
||||||
room::{
|
|
||||||
canonical_alias::RoomCanonicalAliasEventContent,
|
|
||||||
create::RoomCreateEventContent,
|
|
||||||
guest_access::{GuestAccess, RoomGuestAccessEventContent},
|
|
||||||
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
|
|
||||||
join_rules::{JoinRule, RoomJoinRulesEventContent},
|
|
||||||
member::{MembershipState, RoomMemberEventContent},
|
|
||||||
name::RoomNameEventContent,
|
|
||||||
power_levels::RoomPowerLevelsEventContent,
|
|
||||||
tombstone::RoomTombstoneEventContent,
|
|
||||||
topic::RoomTopicEventContent,
|
|
||||||
},
|
|
||||||
StateEventType, TimelineEventType,
|
|
||||||
},
|
|
||||||
int,
|
|
||||||
serde::JsonObject,
|
|
||||||
CanonicalJsonObject, OwnedRoomAliasId, RoomAliasId, RoomId, RoomVersionId,
|
|
||||||
};
|
|
||||||
use serde_json::{json, value::to_raw_value};
|
|
||||||
use std::{cmp::max, collections::BTreeMap, sync::Arc};
|
|
||||||
use tracing::{info, warn};
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/createRoom`
|
|
||||||
///
|
|
||||||
/// Creates a new room.
|
|
||||||
///
|
|
||||||
/// - Room ID is randomly generated
|
|
||||||
/// - Create alias if room_alias_name is set
|
|
||||||
/// - Send create event
|
|
||||||
/// - Join sender user
|
|
||||||
/// - Send power levels event
|
|
||||||
/// - Send canonical room alias
|
|
||||||
/// - Send join rules
|
|
||||||
/// - Send history visibility
|
|
||||||
/// - Send guest access
|
|
||||||
/// - Send events listed in initial state
|
|
||||||
/// - Send events implied by `name` and `topic`
|
|
||||||
/// - Send invite events
|
|
||||||
pub async fn create_room_route(
|
|
||||||
body: Ruma<create_room::v3::Request>,
|
|
||||||
) -> Result<create_room::v3::Response> {
|
|
||||||
use create_room::v3::RoomPreset;
|
|
||||||
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let room_id = RoomId::new(services().globals.server_name());
|
|
||||||
|
|
||||||
services().rooms.short.get_or_create_shortroomid(&room_id)?;
|
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
|
||||||
services()
|
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.entry(room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
if !services().globals.allow_room_creation()
|
|
||||||
&& body.appservice_info.is_none()
|
|
||||||
&& !services().users.is_admin(sender_user)?
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::forbidden(),
|
|
||||||
"Room creation has been disabled.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let alias: Option<OwnedRoomAliasId> =
|
|
||||||
body.room_alias_name
|
|
||||||
.as_ref()
|
|
||||||
.map_or(Ok(None), |localpart| {
|
|
||||||
// TODO: Check for invalid characters and maximum length
|
|
||||||
let alias = RoomAliasId::parse(format!(
|
|
||||||
"#{}:{}",
|
|
||||||
localpart,
|
|
||||||
services().globals.server_name()
|
|
||||||
))
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?;
|
|
||||||
|
|
||||||
if services()
|
|
||||||
.rooms
|
|
||||||
.alias
|
|
||||||
.resolve_local_alias(&alias)?
|
|
||||||
.is_some()
|
|
||||||
{
|
|
||||||
Err(Error::BadRequest(
|
|
||||||
ErrorKind::RoomInUse,
|
|
||||||
"Room alias already exists.",
|
|
||||||
))
|
|
||||||
} else {
|
|
||||||
Ok(Some(alias))
|
|
||||||
}
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if let Some(ref alias) = alias {
|
|
||||||
if let Some(ref info) = body.appservice_info {
|
|
||||||
if !info.aliases.is_match(alias.as_str()) {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"Room alias is not in namespace.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
} else if services().appservice.is_exclusive_alias(alias).await {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"Room alias reserved by appservice.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let room_version = match body.room_version.clone() {
|
|
||||||
Some(room_version) => {
|
|
||||||
if services()
|
|
||||||
.globals
|
|
||||||
.supported_room_versions()
|
|
||||||
.contains(&room_version)
|
|
||||||
{
|
|
||||||
room_version
|
|
||||||
} else {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::UnsupportedRoomVersion,
|
|
||||||
"This server does not support that room version.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None => services().globals.default_room_version(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let content = match &body.creation_content {
|
|
||||||
Some(content) => {
|
|
||||||
let mut content = content
|
|
||||||
.deserialize_as::<CanonicalJsonObject>()
|
|
||||||
.expect("Invalid creation content");
|
|
||||||
|
|
||||||
match room_version {
|
|
||||||
RoomVersionId::V1
|
|
||||||
| RoomVersionId::V2
|
|
||||||
| RoomVersionId::V3
|
|
||||||
| RoomVersionId::V4
|
|
||||||
| RoomVersionId::V5
|
|
||||||
| RoomVersionId::V6
|
|
||||||
| RoomVersionId::V7
|
|
||||||
| RoomVersionId::V8
|
|
||||||
| RoomVersionId::V9
|
|
||||||
| RoomVersionId::V10 => {
|
|
||||||
content.insert(
|
|
||||||
"creator".into(),
|
|
||||||
json!(&sender_user).try_into().map_err(|_| {
|
|
||||||
Error::BadRequest(ErrorKind::BadJson, "Invalid creation content")
|
|
||||||
})?,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
RoomVersionId::V11 => {} // V11 removed the "creator" key
|
|
||||||
_ => unreachable!("Validity of room version already checked"),
|
|
||||||
}
|
|
||||||
|
|
||||||
content.insert(
|
|
||||||
"room_version".into(),
|
|
||||||
json!(room_version.as_str()).try_into().map_err(|_| {
|
|
||||||
Error::BadRequest(ErrorKind::BadJson, "Invalid creation content")
|
|
||||||
})?,
|
|
||||||
);
|
|
||||||
content
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
let content = match room_version {
|
|
||||||
RoomVersionId::V1
|
|
||||||
| RoomVersionId::V2
|
|
||||||
| RoomVersionId::V3
|
|
||||||
| RoomVersionId::V4
|
|
||||||
| RoomVersionId::V5
|
|
||||||
| RoomVersionId::V6
|
|
||||||
| RoomVersionId::V7
|
|
||||||
| RoomVersionId::V8
|
|
||||||
| RoomVersionId::V9
|
|
||||||
| RoomVersionId::V10 => RoomCreateEventContent::new_v1(sender_user.clone()),
|
|
||||||
RoomVersionId::V11 => RoomCreateEventContent::new_v11(),
|
|
||||||
_ => unreachable!("Validity of room version already checked"),
|
|
||||||
};
|
|
||||||
let mut content = serde_json::from_str::<CanonicalJsonObject>(
|
|
||||||
to_raw_value(&content)
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))?
|
|
||||||
.get(),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
content.insert(
|
|
||||||
"room_version".into(),
|
|
||||||
json!(room_version.as_str()).try_into().map_err(|_| {
|
|
||||||
Error::BadRequest(ErrorKind::BadJson, "Invalid creation content")
|
|
||||||
})?,
|
|
||||||
);
|
|
||||||
content
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Validate creation content
|
|
||||||
let de_result = serde_json::from_str::<CanonicalJsonObject>(
|
|
||||||
to_raw_value(&content)
|
|
||||||
.expect("Invalid creation content")
|
|
||||||
.get(),
|
|
||||||
);
|
|
||||||
|
|
||||||
if de_result.is_err() {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::BadJson,
|
|
||||||
"Invalid creation content",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
// 1. The room create event
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: TimelineEventType::RoomCreate,
|
|
||||||
content: to_raw_value(&content).expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some("".to_owned()),
|
|
||||||
redacts: None,
|
|
||||||
timestamp: None,
|
|
||||||
},
|
|
||||||
sender_user,
|
|
||||||
&room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// 2. Let the room creator join
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: TimelineEventType::RoomMember,
|
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
|
||||||
membership: MembershipState::Join,
|
|
||||||
displayname: services().users.displayname(sender_user)?,
|
|
||||||
avatar_url: services().users.avatar_url(sender_user)?,
|
|
||||||
is_direct: Some(body.is_direct),
|
|
||||||
third_party_invite: None,
|
|
||||||
blurhash: services().users.blurhash(sender_user)?,
|
|
||||||
reason: None,
|
|
||||||
join_authorized_via_users_server: None,
|
|
||||||
})
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some(sender_user.to_string()),
|
|
||||||
redacts: None,
|
|
||||||
timestamp: None,
|
|
||||||
},
|
|
||||||
sender_user,
|
|
||||||
&room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// 3. Power levels
|
|
||||||
|
|
||||||
// Figure out preset. We need it for preset specific events
|
|
||||||
let preset = body.preset.clone().unwrap_or(match &body.visibility {
|
|
||||||
room::Visibility::Private => RoomPreset::PrivateChat,
|
|
||||||
room::Visibility::Public => RoomPreset::PublicChat,
|
|
||||||
_ => RoomPreset::PrivateChat, // Room visibility should not be custom
|
|
||||||
});
|
|
||||||
|
|
||||||
let mut users = BTreeMap::new();
|
|
||||||
users.insert(sender_user.clone(), int!(100));
|
|
||||||
|
|
||||||
if preset == RoomPreset::TrustedPrivateChat {
|
|
||||||
for invite_ in &body.invite {
|
|
||||||
users.insert(invite_.clone(), int!(100));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut power_levels_content = serde_json::to_value(RoomPowerLevelsEventContent {
|
|
||||||
users,
|
|
||||||
..Default::default()
|
|
||||||
})
|
|
||||||
.expect("event is valid, we just created it");
|
|
||||||
|
|
||||||
if let Some(power_level_content_override) = &body.power_level_content_override {
|
|
||||||
let json: JsonObject = serde_json::from_str(power_level_content_override.json().get())
|
|
||||||
.map_err(|_| {
|
|
||||||
Error::BadRequest(ErrorKind::BadJson, "Invalid power_level_content_override.")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
for (key, value) in json {
|
|
||||||
power_levels_content[key] = value;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: TimelineEventType::RoomPowerLevels,
|
|
||||||
content: to_raw_value(&power_levels_content)
|
|
||||||
.expect("to_raw_value always works on serde_json::Value"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some("".to_owned()),
|
|
||||||
redacts: None,
|
|
||||||
timestamp: None,
|
|
||||||
},
|
|
||||||
sender_user,
|
|
||||||
&room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// 4. Canonical room alias
|
|
||||||
if let Some(room_alias_id) = &alias {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: TimelineEventType::RoomCanonicalAlias,
|
|
||||||
content: to_raw_value(&RoomCanonicalAliasEventContent {
|
|
||||||
alias: Some(room_alias_id.to_owned()),
|
|
||||||
alt_aliases: vec![],
|
|
||||||
})
|
|
||||||
.expect("We checked that alias earlier, it must be fine"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some("".to_owned()),
|
|
||||||
redacts: None,
|
|
||||||
timestamp: None,
|
|
||||||
},
|
|
||||||
sender_user,
|
|
||||||
&room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// 5. Events set by preset
|
|
||||||
|
|
||||||
// 5.1 Join Rules
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: TimelineEventType::RoomJoinRules,
|
|
||||||
content: to_raw_value(&RoomJoinRulesEventContent::new(match preset {
|
|
||||||
RoomPreset::PublicChat => JoinRule::Public,
|
|
||||||
// according to spec "invite" is the default
|
|
||||||
_ => JoinRule::Invite,
|
|
||||||
}))
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some("".to_owned()),
|
|
||||||
redacts: None,
|
|
||||||
timestamp: None,
|
|
||||||
},
|
|
||||||
sender_user,
|
|
||||||
&room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// 5.2 History Visibility
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: TimelineEventType::RoomHistoryVisibility,
|
|
||||||
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
|
|
||||||
HistoryVisibility::Shared,
|
|
||||||
))
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some("".to_owned()),
|
|
||||||
redacts: None,
|
|
||||||
timestamp: None,
|
|
||||||
},
|
|
||||||
sender_user,
|
|
||||||
&room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// 5.3 Guest Access
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: TimelineEventType::RoomGuestAccess,
|
|
||||||
content: to_raw_value(&RoomGuestAccessEventContent::new(match preset {
|
|
||||||
RoomPreset::PublicChat => GuestAccess::Forbidden,
|
|
||||||
_ => GuestAccess::CanJoin,
|
|
||||||
}))
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some("".to_owned()),
|
|
||||||
redacts: None,
|
|
||||||
timestamp: None,
|
|
||||||
},
|
|
||||||
sender_user,
|
|
||||||
&room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// 6. Events listed in initial_state
|
|
||||||
for event in &body.initial_state {
|
|
||||||
let mut pdu_builder = event.deserialize_as::<PduBuilder>().map_err(|e| {
|
|
||||||
warn!("Invalid initial state event: {:?}", e);
|
|
||||||
Error::BadRequest(ErrorKind::InvalidParam, "Invalid initial state event.")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
// Implicit state key defaults to ""
|
|
||||||
pdu_builder.state_key.get_or_insert_with(|| "".to_owned());
|
|
||||||
|
|
||||||
// Silently skip encryption events if they are not allowed
|
|
||||||
if pdu_builder.event_type == TimelineEventType::RoomEncryption
|
|
||||||
&& !services().globals.allow_encryption()
|
|
||||||
{
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// 7. Events implied by name and topic
|
|
||||||
if let Some(name) = &body.name {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: TimelineEventType::RoomName,
|
|
||||||
content: to_raw_value(&RoomNameEventContent::new(name.clone()))
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some("".to_owned()),
|
|
||||||
redacts: None,
|
|
||||||
timestamp: None,
|
|
||||||
},
|
|
||||||
sender_user,
|
|
||||||
&room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(topic) = &body.topic {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: TimelineEventType::RoomTopic,
|
|
||||||
content: to_raw_value(&RoomTopicEventContent {
|
|
||||||
topic: topic.clone(),
|
|
||||||
})
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some("".to_owned()),
|
|
||||||
redacts: None,
|
|
||||||
timestamp: None,
|
|
||||||
},
|
|
||||||
sender_user,
|
|
||||||
&room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// 8. Events implied by invite (and TODO: invite_3pid)
|
|
||||||
drop(state_lock);
|
|
||||||
for user_id in &body.invite {
|
|
||||||
let _ = invite_helper(sender_user, user_id, &room_id, None, body.is_direct).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Homeserver specific stuff
|
|
||||||
if let Some(alias) = alias {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.alias
|
|
||||||
.set_alias(&alias, &room_id, sender_user)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if body.visibility == room::Visibility::Public {
|
|
||||||
services().rooms.directory.set_public(&room_id)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("{} created a room", sender_user);
|
|
||||||
|
|
||||||
Ok(create_room::v3::Response::new(room_id))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/event/{eventId}`
|
|
||||||
///
|
|
||||||
/// Gets a single event.
|
|
||||||
///
|
|
||||||
/// - You have to currently be joined to the room (TODO: Respect history visibility)
|
|
||||||
pub async fn get_room_event_route(
|
|
||||||
body: Ruma<get_room_event::v3::Request>,
|
|
||||||
) -> Result<get_room_event::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let event = services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.get_pdu(&body.event_id)?
|
|
||||||
.ok_or_else(|| {
|
|
||||||
warn!("Event not found, event ID: {:?}", &body.event_id);
|
|
||||||
Error::BadRequest(ErrorKind::NotFound, "Event not found.")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if !services().rooms.state_accessor.user_can_see_event(
|
|
||||||
sender_user,
|
|
||||||
&event.room_id,
|
|
||||||
&body.event_id,
|
|
||||||
)? {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::forbidden(),
|
|
||||||
"You don't have permission to view this event.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut event = (*event).clone();
|
|
||||||
event.add_age()?;
|
|
||||||
|
|
||||||
Ok(get_room_event::v3::Response {
|
|
||||||
event: event.to_room_event(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/aliases`
|
|
||||||
///
|
|
||||||
/// Lists all aliases of the room.
|
|
||||||
///
|
|
||||||
/// - Only users joined to the room are allowed to call this TODO: Allow any user to call it if history_visibility is world readable
|
|
||||||
pub async fn get_room_aliases_route(
|
|
||||||
body: Ruma<aliases::v3::Request>,
|
|
||||||
) -> Result<aliases::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if !services()
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.is_joined(sender_user, &body.room_id)?
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::forbidden(),
|
|
||||||
"You don't have permission to view this room.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(aliases::v3::Response {
|
|
||||||
aliases: services()
|
|
||||||
.rooms
|
|
||||||
.alias
|
|
||||||
.local_aliases_for_room(&body.room_id)
|
|
||||||
.filter_map(|a| a.ok())
|
|
||||||
.collect(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/upgrade`
|
|
||||||
///
|
|
||||||
/// Upgrades the room.
|
|
||||||
///
|
|
||||||
/// - Creates a replacement room
|
|
||||||
/// - Sends a tombstone event into the current room
|
|
||||||
/// - Sender user joins the room
|
|
||||||
/// - Transfers some state events
|
|
||||||
/// - Moves local aliases
|
|
||||||
/// - Modifies old room power levels to prevent users from speaking
|
|
||||||
pub async fn upgrade_room_route(
|
|
||||||
body: Ruma<upgrade_room::v3::Request>,
|
|
||||||
) -> Result<upgrade_room::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if !services()
|
|
||||||
.globals
|
|
||||||
.supported_room_versions()
|
|
||||||
.contains(&body.new_version)
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::UnsupportedRoomVersion,
|
|
||||||
"This server does not support that room version.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a replacement room
|
|
||||||
let replacement_room = RoomId::new(services().globals.server_name());
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.short
|
|
||||||
.get_or_create_shortroomid(&replacement_room)?;
|
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
|
||||||
services()
|
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.entry(body.room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
// Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further
|
|
||||||
// Fail if the sender does not have the required permissions
|
|
||||||
let tombstone_event_id = services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: TimelineEventType::RoomTombstone,
|
|
||||||
content: to_raw_value(&RoomTombstoneEventContent {
|
|
||||||
body: "This room has been replaced".to_owned(),
|
|
||||||
replacement_room: replacement_room.clone(),
|
|
||||||
})
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some("".to_owned()),
|
|
||||||
redacts: None,
|
|
||||||
timestamp: None,
|
|
||||||
},
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// Change lock to replacement room
|
|
||||||
drop(state_lock);
|
|
||||||
let mutex_state = Arc::clone(
|
|
||||||
services()
|
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.entry(replacement_room.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
// Get the old room creation event
|
|
||||||
let mut create_event_content = serde_json::from_str::<CanonicalJsonObject>(
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_get(&body.room_id, &StateEventType::RoomCreate, "")?
|
|
||||||
.ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
|
|
||||||
.content
|
|
||||||
.get(),
|
|
||||||
)
|
|
||||||
.map_err(|_| Error::bad_database("Invalid room event in database."))?;
|
|
||||||
|
|
||||||
// Use the m.room.tombstone event as the predecessor
|
|
||||||
let predecessor = Some(ruma::events::room::create::PreviousRoom::new(
|
|
||||||
body.room_id.clone(),
|
|
||||||
(*tombstone_event_id).to_owned(),
|
|
||||||
));
|
|
||||||
|
|
||||||
// Send a m.room.create event containing a predecessor field and the applicable room_version
|
|
||||||
match body.new_version {
|
|
||||||
RoomVersionId::V1
|
|
||||||
| RoomVersionId::V2
|
|
||||||
| RoomVersionId::V3
|
|
||||||
| RoomVersionId::V4
|
|
||||||
| RoomVersionId::V5
|
|
||||||
| RoomVersionId::V6
|
|
||||||
| RoomVersionId::V7
|
|
||||||
| RoomVersionId::V8
|
|
||||||
| RoomVersionId::V9
|
|
||||||
| RoomVersionId::V10 => {
|
|
||||||
create_event_content.insert(
|
|
||||||
"creator".into(),
|
|
||||||
json!(&sender_user).try_into().map_err(|_| {
|
|
||||||
Error::BadRequest(ErrorKind::BadJson, "Error forming creation event")
|
|
||||||
})?,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
RoomVersionId::V11 => {
|
|
||||||
// "creator" key no longer exists in V11 rooms
|
|
||||||
create_event_content.remove("creator");
|
|
||||||
}
|
|
||||||
_ => unreachable!("Validity of room version already checked"),
|
|
||||||
}
|
|
||||||
create_event_content.insert(
|
|
||||||
"room_version".into(),
|
|
||||||
json!(&body.new_version)
|
|
||||||
.try_into()
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?,
|
|
||||||
);
|
|
||||||
create_event_content.insert(
|
|
||||||
"predecessor".into(),
|
|
||||||
json!(predecessor)
|
|
||||||
.try_into()
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?,
|
|
||||||
);
|
|
||||||
|
|
||||||
// Validate creation event content
|
|
||||||
let de_result = serde_json::from_str::<CanonicalJsonObject>(
|
|
||||||
to_raw_value(&create_event_content)
|
|
||||||
.expect("Error forming creation event")
|
|
||||||
.get(),
|
|
||||||
);
|
|
||||||
|
|
||||||
if de_result.is_err() {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::BadJson,
|
|
||||||
"Error forming creation event",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: TimelineEventType::RoomCreate,
|
|
||||||
content: to_raw_value(&create_event_content)
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some("".to_owned()),
|
|
||||||
redacts: None,
|
|
||||||
timestamp: None,
|
|
||||||
},
|
|
||||||
sender_user,
|
|
||||||
&replacement_room,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// Join the new room
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: TimelineEventType::RoomMember,
|
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
|
||||||
membership: MembershipState::Join,
|
|
||||||
displayname: services().users.displayname(sender_user)?,
|
|
||||||
avatar_url: services().users.avatar_url(sender_user)?,
|
|
||||||
is_direct: None,
|
|
||||||
third_party_invite: None,
|
|
||||||
blurhash: services().users.blurhash(sender_user)?,
|
|
||||||
reason: None,
|
|
||||||
join_authorized_via_users_server: None,
|
|
||||||
})
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some(sender_user.to_string()),
|
|
||||||
redacts: None,
|
|
||||||
timestamp: None,
|
|
||||||
},
|
|
||||||
sender_user,
|
|
||||||
&replacement_room,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// Recommended transferable state events list from the specs
|
|
||||||
let transferable_state_events = vec![
|
|
||||||
StateEventType::RoomServerAcl,
|
|
||||||
StateEventType::RoomEncryption,
|
|
||||||
StateEventType::RoomName,
|
|
||||||
StateEventType::RoomAvatar,
|
|
||||||
StateEventType::RoomTopic,
|
|
||||||
StateEventType::RoomGuestAccess,
|
|
||||||
StateEventType::RoomHistoryVisibility,
|
|
||||||
StateEventType::RoomJoinRules,
|
|
||||||
StateEventType::RoomPowerLevels,
|
|
||||||
];
|
|
||||||
|
|
||||||
// Replicate transferable state events to the new room
|
|
||||||
for event_type in transferable_state_events {
|
|
||||||
let event_content =
|
|
||||||
match services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_get(&body.room_id, &event_type, "")?
|
|
||||||
{
|
|
||||||
Some(v) => v.content.clone(),
|
|
||||||
None => continue, // Skipping missing events.
|
|
||||||
};
|
|
||||||
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: event_type.to_string().into(),
|
|
||||||
content: event_content,
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some("".to_owned()),
|
|
||||||
redacts: None,
|
|
||||||
timestamp: None,
|
|
||||||
},
|
|
||||||
sender_user,
|
|
||||||
&replacement_room,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Moves any local aliases to the new room
|
|
||||||
for alias in services()
|
|
||||||
.rooms
|
|
||||||
.alias
|
|
||||||
.local_aliases_for_room(&body.room_id)
|
|
||||||
.filter_map(|r| r.ok())
|
|
||||||
{
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.alias
|
|
||||||
.set_alias(&alias, &replacement_room, sender_user)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the old room power levels
|
|
||||||
let mut power_levels_event_content: RoomPowerLevelsEventContent = serde_json::from_str(
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_get(&body.room_id, &StateEventType::RoomPowerLevels, "")?
|
|
||||||
.ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
|
|
||||||
.content
|
|
||||||
.get(),
|
|
||||||
)
|
|
||||||
.map_err(|_| Error::bad_database("Invalid room event in database."))?;
|
|
||||||
|
|
||||||
// Setting events_default and invite to the greater of 50 and users_default + 1
|
|
||||||
let new_level = max(int!(50), power_levels_event_content.users_default + int!(1));
|
|
||||||
power_levels_event_content.events_default = new_level;
|
|
||||||
power_levels_event_content.invite = new_level;
|
|
||||||
|
|
||||||
// Modify the power levels in the old room to prevent sending of events and inviting new users
|
|
||||||
let _ = services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: TimelineEventType::RoomPowerLevels,
|
|
||||||
content: to_raw_value(&power_levels_event_content)
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some("".to_owned()),
|
|
||||||
redacts: None,
|
|
||||||
timestamp: None,
|
|
||||||
},
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
drop(state_lock);
|
|
||||||
|
|
||||||
// Return the replacement room id
|
|
||||||
Ok(upgrade_room::v3::Response { replacement_room })
|
|
||||||
}
|
|
|
@ -1,279 +0,0 @@
|
||||||
use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH};
|
|
||||||
use crate::{services, utils, Error, Result, Ruma};
|
|
||||||
use ruma::{
|
|
||||||
api::client::{
|
|
||||||
error::ErrorKind,
|
|
||||||
session::{get_login_types, login, logout, logout_all},
|
|
||||||
uiaa::UserIdentifier,
|
|
||||||
},
|
|
||||||
UserId,
|
|
||||||
};
|
|
||||||
use serde::Deserialize;
|
|
||||||
use tracing::{info, warn};
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize)]
|
|
||||||
struct Claims {
|
|
||||||
sub: String,
|
|
||||||
//exp: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/login`
|
|
||||||
///
|
|
||||||
/// Get the supported login types of this server. One of these should be used as the `type` field
|
|
||||||
/// when logging in.
|
|
||||||
pub async fn get_login_types_route(
|
|
||||||
_body: Ruma<get_login_types::v3::Request>,
|
|
||||||
) -> Result<get_login_types::v3::Response> {
|
|
||||||
Ok(get_login_types::v3::Response::new(vec![
|
|
||||||
get_login_types::v3::LoginType::Password(Default::default()),
|
|
||||||
get_login_types::v3::LoginType::ApplicationService(Default::default()),
|
|
||||||
]))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/login`
|
|
||||||
///
|
|
||||||
/// Authenticates the user and returns an access token it can use in subsequent requests.
|
|
||||||
///
|
|
||||||
/// - The user needs to authenticate using their password (or if enabled using a json web token)
|
|
||||||
/// - If `device_id` is known: invalidates old access token of that device
|
|
||||||
/// - If `device_id` is unknown: creates a new device
|
|
||||||
/// - Returns access token that is associated with the user and device
|
|
||||||
///
|
|
||||||
/// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see
|
|
||||||
/// supported login types.
|
|
||||||
pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Response> {
|
|
||||||
// To allow deprecated login methods
|
|
||||||
#![allow(deprecated)]
|
|
||||||
// Validate login method
|
|
||||||
// TODO: Other login methods
|
|
||||||
let user_id = match &body.login_info {
|
|
||||||
login::v3::LoginInfo::Password(login::v3::Password {
|
|
||||||
identifier,
|
|
||||||
password,
|
|
||||||
user,
|
|
||||||
address: _,
|
|
||||||
medium: _,
|
|
||||||
}) => {
|
|
||||||
let user_id = if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
|
|
||||||
UserId::parse_with_server_name(
|
|
||||||
user_id.to_lowercase(),
|
|
||||||
services().globals.server_name(),
|
|
||||||
)
|
|
||||||
} else if let Some(user) = user {
|
|
||||||
UserId::parse(user)
|
|
||||||
} else {
|
|
||||||
warn!("Bad login type: {:?}", &body.login_info);
|
|
||||||
return Err(Error::BadRequest(ErrorKind::forbidden(), "Bad login type."));
|
|
||||||
}
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;
|
|
||||||
|
|
||||||
if services().appservice.is_exclusive_user_id(&user_id).await {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"User id reserved by appservice.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let hash = services()
|
|
||||||
.users
|
|
||||||
.password_hash(&user_id)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::forbidden(),
|
|
||||||
"Wrong username or password.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
if hash.is_empty() {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::UserDeactivated,
|
|
||||||
"The user has been deactivated",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let hash_matches = argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false);
|
|
||||||
|
|
||||||
if !hash_matches {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::forbidden(),
|
|
||||||
"Wrong username or password.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
user_id
|
|
||||||
}
|
|
||||||
login::v3::LoginInfo::Token(login::v3::Token { token }) => {
|
|
||||||
if let Some(jwt_decoding_key) = services().globals.jwt_decoding_key() {
|
|
||||||
let token = jsonwebtoken::decode::<Claims>(
|
|
||||||
token,
|
|
||||||
jwt_decoding_key,
|
|
||||||
&jsonwebtoken::Validation::default(),
|
|
||||||
)
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Token is invalid."))?;
|
|
||||||
let username = token.claims.sub.to_lowercase();
|
|
||||||
let user_id =
|
|
||||||
UserId::parse_with_server_name(username, services().globals.server_name())
|
|
||||||
.map_err(|_| {
|
|
||||||
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if services().appservice.is_exclusive_user_id(&user_id).await {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"User id reserved by appservice.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
user_id
|
|
||||||
} else {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Unknown,
|
|
||||||
"Token login is not supported (server has no jwt decoding key).",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
login::v3::LoginInfo::ApplicationService(login::v3::ApplicationService {
|
|
||||||
identifier,
|
|
||||||
user,
|
|
||||||
}) => {
|
|
||||||
let user_id = if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
|
|
||||||
UserId::parse_with_server_name(
|
|
||||||
user_id.to_lowercase(),
|
|
||||||
services().globals.server_name(),
|
|
||||||
)
|
|
||||||
} else if let Some(user) = user {
|
|
||||||
UserId::parse(user)
|
|
||||||
} else {
|
|
||||||
warn!("Bad login type: {:?}", &body.login_info);
|
|
||||||
return Err(Error::BadRequest(ErrorKind::forbidden(), "Bad login type."));
|
|
||||||
}
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;
|
|
||||||
|
|
||||||
if let Some(ref info) = body.appservice_info {
|
|
||||||
if !info.is_user_match(&user_id) {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"User is not in namespace.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::MissingToken,
|
|
||||||
"Missing appservice token.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
user_id
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
warn!("Unsupported or unknown login type: {:?}", &body.login_info);
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Unknown,
|
|
||||||
"Unsupported login type.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Generate new device id if the user didn't specify one
|
|
||||||
let device_id = body
|
|
||||||
.device_id
|
|
||||||
.clone()
|
|
||||||
.unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into());
|
|
||||||
|
|
||||||
// Generate a new token for the device
|
|
||||||
let token = utils::random_string(TOKEN_LENGTH);
|
|
||||||
|
|
||||||
// Determine if device_id was provided and exists in the db for this user
|
|
||||||
let device_exists = body.device_id.as_ref().map_or(false, |device_id| {
|
|
||||||
services()
|
|
||||||
.users
|
|
||||||
.all_device_ids(&user_id)
|
|
||||||
.any(|x| x.as_ref().map_or(false, |v| v == device_id))
|
|
||||||
});
|
|
||||||
|
|
||||||
if device_exists {
|
|
||||||
services().users.set_token(&user_id, &device_id, &token)?;
|
|
||||||
} else {
|
|
||||||
services().users.create_device(
|
|
||||||
&user_id,
|
|
||||||
&device_id,
|
|
||||||
&token,
|
|
||||||
body.initial_device_display_name.clone(),
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("{} logged in", user_id);
|
|
||||||
|
|
||||||
// Homeservers are still required to send the `home_server` field
|
|
||||||
#[allow(deprecated)]
|
|
||||||
Ok(login::v3::Response {
|
|
||||||
user_id,
|
|
||||||
access_token: token,
|
|
||||||
home_server: Some(services().globals.server_name().to_owned()),
|
|
||||||
device_id,
|
|
||||||
well_known: None,
|
|
||||||
refresh_token: None,
|
|
||||||
expires_in: None,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/logout`
|
|
||||||
///
|
|
||||||
/// Log out the current device.
|
|
||||||
///
|
|
||||||
/// - Invalidates access token
|
|
||||||
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
|
|
||||||
/// - Forgets to-device events
|
|
||||||
/// - Triggers device list updates
|
|
||||||
pub async fn logout_route(body: Ruma<logout::v3::Request>) -> Result<logout::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if let Some(ref info) = body.appservice_info {
|
|
||||||
if !info.is_user_match(sender_user) {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"User is not in namespace.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
services().users.remove_device(sender_user, sender_device)?;
|
|
||||||
|
|
||||||
Ok(logout::v3::Response::new())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/logout/all`
|
|
||||||
///
|
|
||||||
/// Log out all devices of this user.
|
|
||||||
///
|
|
||||||
/// - Invalidates all access tokens
|
|
||||||
/// - Deletes all device metadata (device id, device display name, last seen ip, last seen ts)
|
|
||||||
/// - Forgets all to-device events
|
|
||||||
/// - Triggers device list updates
|
|
||||||
///
|
|
||||||
/// Note: This is equivalent to calling [`GET /_matrix/client/r0/logout`](fn.logout_route.html)
|
|
||||||
/// from each device of this user.
|
|
||||||
pub async fn logout_all_route(
|
|
||||||
body: Ruma<logout_all::v3::Request>,
|
|
||||||
) -> Result<logout_all::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if let Some(ref info) = body.appservice_info {
|
|
||||||
if !info.is_user_match(sender_user) {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"User is not in namespace.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::MissingToken,
|
|
||||||
"Missing appservice token.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
for device_id in services().users.all_device_ids(sender_user).flatten() {
|
|
||||||
services().users.remove_device(sender_user, &device_id)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(logout_all::v3::Response::new())
|
|
||||||
}
|
|
|
@ -1,34 +0,0 @@
|
||||||
use crate::{services, Result, Ruma};
|
|
||||||
use ruma::api::client::space::get_hierarchy;
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy``
|
|
||||||
///
|
|
||||||
/// Paginates over the space tree in a depth-first manner to locate child rooms of a given space.
|
|
||||||
pub async fn get_hierarchy_route(
|
|
||||||
body: Ruma<get_hierarchy::v1::Request>,
|
|
||||||
) -> Result<get_hierarchy::v1::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let skip = body
|
|
||||||
.from
|
|
||||||
.as_ref()
|
|
||||||
.and_then(|s| s.parse::<usize>().ok())
|
|
||||||
.unwrap_or(0);
|
|
||||||
|
|
||||||
let limit = body.limit.map_or(10, u64::from).min(100) as usize;
|
|
||||||
|
|
||||||
let max_depth = body.max_depth.map_or(3, u64::from).min(10) as usize + 1; // +1 to skip the space room itself
|
|
||||||
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.spaces
|
|
||||||
.get_hierarchy(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
limit,
|
|
||||||
skip,
|
|
||||||
max_depth,
|
|
||||||
body.suggested_only,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
|
|
@ -1,266 +0,0 @@
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use crate::{service::pdu::PduBuilder, services, Error, Result, Ruma, RumaResponse};
|
|
||||||
use ruma::{
|
|
||||||
api::client::{
|
|
||||||
error::ErrorKind,
|
|
||||||
state::{get_state_events, get_state_events_for_key, send_state_event},
|
|
||||||
},
|
|
||||||
events::{
|
|
||||||
room::canonical_alias::RoomCanonicalAliasEventContent, AnyStateEventContent, StateEventType,
|
|
||||||
},
|
|
||||||
serde::Raw,
|
|
||||||
EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId,
|
|
||||||
};
|
|
||||||
use tracing::log::warn;
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}/{stateKey}`
|
|
||||||
///
|
|
||||||
/// Sends a state event into the room.
|
|
||||||
///
|
|
||||||
/// - The only requirement for the content is that it has to be valid json
|
|
||||||
/// - Tries to send the event into the room, auth rules will determine if it is allowed
|
|
||||||
/// - If event is new canonical_alias: Rejects if alias is incorrect
|
|
||||||
pub async fn send_state_event_for_key_route(
|
|
||||||
body: Ruma<send_state_event::v3::Request>,
|
|
||||||
) -> Result<send_state_event::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let event_id = send_state_event_for_key_helper(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
&body.event_type,
|
|
||||||
&body.body.body, // Yes, I hate it too
|
|
||||||
body.state_key.to_owned(),
|
|
||||||
if body.appservice_info.is_some() {
|
|
||||||
body.timestamp
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let event_id = (*event_id).to_owned();
|
|
||||||
Ok(send_state_event::v3::Response { event_id })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}`
|
|
||||||
///
|
|
||||||
/// Sends a state event into the room.
|
|
||||||
///
|
|
||||||
/// - The only requirement for the content is that it has to be valid json
|
|
||||||
/// - Tries to send the event into the room, auth rules will determine if it is allowed
|
|
||||||
/// - If event is new canonical_alias: Rejects if alias is incorrect
|
|
||||||
pub async fn send_state_event_for_empty_key_route(
|
|
||||||
body: Ruma<send_state_event::v3::Request>,
|
|
||||||
) -> Result<RumaResponse<send_state_event::v3::Response>> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
// Forbid m.room.encryption if encryption is disabled
|
|
||||||
if body.event_type == StateEventType::RoomEncryption && !services().globals.allow_encryption() {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::forbidden(),
|
|
||||||
"Encryption has been disabled",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let event_id = send_state_event_for_key_helper(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
&body.event_type.to_string().into(),
|
|
||||||
&body.body.body,
|
|
||||||
body.state_key.to_owned(),
|
|
||||||
if body.appservice_info.is_some() {
|
|
||||||
body.timestamp
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let event_id = (*event_id).to_owned();
|
|
||||||
Ok(send_state_event::v3::Response { event_id }.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomid}/state`
|
|
||||||
///
|
|
||||||
/// Get all state events for a room.
|
|
||||||
///
|
|
||||||
/// - If not joined: Only works if current room history visibility is world readable
|
|
||||||
pub async fn get_state_events_route(
|
|
||||||
body: Ruma<get_state_events::v3::Request>,
|
|
||||||
) -> Result<get_state_events::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if !services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::forbidden(),
|
|
||||||
"You don't have permission to view the room state.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(get_state_events::v3::Response {
|
|
||||||
room_state: services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_full(&body.room_id)
|
|
||||||
.await?
|
|
||||||
.values()
|
|
||||||
.map(|pdu| pdu.to_state_event())
|
|
||||||
.collect(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomid}/state/{eventType}/{stateKey}`
|
|
||||||
///
|
|
||||||
/// Get single state event of a room.
|
|
||||||
///
|
|
||||||
/// - If not joined: Only works if current room history visibility is world readable
|
|
||||||
pub async fn get_state_events_for_key_route(
|
|
||||||
body: Ruma<get_state_events_for_key::v3::Request>,
|
|
||||||
) -> Result<get_state_events_for_key::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if !services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::forbidden(),
|
|
||||||
"You don't have permission to view the room state.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let event = services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_get(&body.room_id, &body.event_type, &body.state_key)?
|
|
||||||
.ok_or_else(|| {
|
|
||||||
warn!(
|
|
||||||
"State event {:?} not found in room {:?}",
|
|
||||||
&body.event_type, &body.room_id
|
|
||||||
);
|
|
||||||
Error::BadRequest(ErrorKind::NotFound, "State event not found.")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(get_state_events_for_key::v3::Response {
|
|
||||||
content: serde_json::from_str(event.content.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid event content in database"))?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomid}/state/{eventType}`
|
|
||||||
///
|
|
||||||
/// Get single state event of a room.
|
|
||||||
///
|
|
||||||
/// - If not joined: Only works if current room history visibility is world readable
|
|
||||||
pub async fn get_state_events_for_empty_key_route(
|
|
||||||
body: Ruma<get_state_events_for_key::v3::Request>,
|
|
||||||
) -> Result<RumaResponse<get_state_events_for_key::v3::Response>> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if !services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::forbidden(),
|
|
||||||
"You don't have permission to view the room state.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let event = services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_get(&body.room_id, &body.event_type, "")?
|
|
||||||
.ok_or_else(|| {
|
|
||||||
warn!(
|
|
||||||
"State event {:?} not found in room {:?}",
|
|
||||||
&body.event_type, &body.room_id
|
|
||||||
);
|
|
||||||
Error::BadRequest(ErrorKind::NotFound, "State event not found.")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(get_state_events_for_key::v3::Response {
|
|
||||||
content: serde_json::from_str(event.content.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid event content in database"))?,
|
|
||||||
}
|
|
||||||
.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn send_state_event_for_key_helper(
|
|
||||||
sender: &UserId,
|
|
||||||
room_id: &RoomId,
|
|
||||||
event_type: &StateEventType,
|
|
||||||
json: &Raw<AnyStateEventContent>,
|
|
||||||
state_key: String,
|
|
||||||
timestamp: Option<MilliSecondsSinceUnixEpoch>,
|
|
||||||
) -> Result<Arc<EventId>> {
|
|
||||||
let sender_user = sender;
|
|
||||||
|
|
||||||
// TODO: Review this check, error if event is unparsable, use event type, allow alias if it
|
|
||||||
// previously existed
|
|
||||||
if let Ok(canonical_alias) =
|
|
||||||
serde_json::from_str::<RoomCanonicalAliasEventContent>(json.json().get())
|
|
||||||
{
|
|
||||||
let mut aliases = canonical_alias.alt_aliases.clone();
|
|
||||||
|
|
||||||
if let Some(alias) = canonical_alias.alias {
|
|
||||||
aliases.push(alias);
|
|
||||||
}
|
|
||||||
|
|
||||||
for alias in aliases {
|
|
||||||
if alias.server_name() != services().globals.server_name()
|
|
||||||
|| services()
|
|
||||||
.rooms
|
|
||||||
.alias
|
|
||||||
.resolve_local_alias(&alias)?
|
|
||||||
.filter(|room| room == room_id) // Make sure it's the right room
|
|
||||||
.is_none()
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::forbidden(),
|
|
||||||
"You are only allowed to send canonical_alias \
|
|
||||||
events when it's aliases already exists",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
|
||||||
services()
|
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.entry(room_id.to_owned())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
let event_id = services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: event_type.to_string().into(),
|
|
||||||
content: serde_json::from_str(json.json().get()).expect("content is valid json"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some(state_key),
|
|
||||||
redacts: None,
|
|
||||||
timestamp,
|
|
||||||
},
|
|
||||||
sender_user,
|
|
||||||
room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(event_id)
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,126 +0,0 @@
|
||||||
use crate::{services, Error, Result, Ruma};
|
|
||||||
use ruma::{
|
|
||||||
api::client::tag::{create_tag, delete_tag, get_tags},
|
|
||||||
events::{
|
|
||||||
tag::{TagEvent, TagEventContent},
|
|
||||||
RoomAccountDataEventType,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}`
|
|
||||||
///
|
|
||||||
/// Adds a tag to the room.
|
|
||||||
///
|
|
||||||
/// - Inserts the tag into the tag event of the room account data.
|
|
||||||
pub async fn update_tag_route(
|
|
||||||
body: Ruma<create_tag::v3::Request>,
|
|
||||||
) -> Result<create_tag::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let event = services().account_data.get(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
RoomAccountDataEventType::Tag,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let mut tags_event = event
|
|
||||||
.map(|e| {
|
|
||||||
serde_json::from_str(e.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))
|
|
||||||
})
|
|
||||||
.unwrap_or_else(|| {
|
|
||||||
Ok(TagEvent {
|
|
||||||
content: TagEventContent {
|
|
||||||
tags: BTreeMap::new(),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
})?;
|
|
||||||
|
|
||||||
tags_event
|
|
||||||
.content
|
|
||||||
.tags
|
|
||||||
.insert(body.tag.clone().into(), body.tag_info.clone());
|
|
||||||
|
|
||||||
services().account_data.update(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
RoomAccountDataEventType::Tag,
|
|
||||||
&serde_json::to_value(tags_event).expect("to json value always works"),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(create_tag::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `DELETE /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}`
|
|
||||||
///
|
|
||||||
/// Deletes a tag from the room.
|
|
||||||
///
|
|
||||||
/// - Removes the tag from the tag event of the room account data.
|
|
||||||
pub async fn delete_tag_route(
|
|
||||||
body: Ruma<delete_tag::v3::Request>,
|
|
||||||
) -> Result<delete_tag::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let event = services().account_data.get(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
RoomAccountDataEventType::Tag,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let mut tags_event = event
|
|
||||||
.map(|e| {
|
|
||||||
serde_json::from_str(e.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))
|
|
||||||
})
|
|
||||||
.unwrap_or_else(|| {
|
|
||||||
Ok(TagEvent {
|
|
||||||
content: TagEventContent {
|
|
||||||
tags: BTreeMap::new(),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
})?;
|
|
||||||
|
|
||||||
tags_event.content.tags.remove(&body.tag.clone().into());
|
|
||||||
|
|
||||||
services().account_data.update(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
RoomAccountDataEventType::Tag,
|
|
||||||
&serde_json::to_value(tags_event).expect("to json value always works"),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(delete_tag::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags`
|
|
||||||
///
|
|
||||||
/// Returns tags on the room.
|
|
||||||
///
|
|
||||||
/// - Gets the tag event of the room account data.
|
|
||||||
pub async fn get_tags_route(body: Ruma<get_tags::v3::Request>) -> Result<get_tags::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let event = services().account_data.get(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
RoomAccountDataEventType::Tag,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let tags_event = event
|
|
||||||
.map(|e| {
|
|
||||||
serde_json::from_str(e.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))
|
|
||||||
})
|
|
||||||
.unwrap_or_else(|| {
|
|
||||||
Ok(TagEvent {
|
|
||||||
content: TagEventContent {
|
|
||||||
tags: BTreeMap::new(),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(get_tags::v3::Response {
|
|
||||||
tags: tags_event.content.tags,
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,16 +0,0 @@
|
||||||
use crate::{Result, Ruma};
|
|
||||||
use ruma::api::client::thirdparty::get_protocols;
|
|
||||||
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/thirdparty/protocols`
|
|
||||||
///
|
|
||||||
/// TODO: Fetches all metadata about protocols supported by the homeserver.
|
|
||||||
pub async fn get_protocols_route(
|
|
||||||
_body: Ruma<get_protocols::v3::Request>,
|
|
||||||
) -> Result<get_protocols::v3::Response> {
|
|
||||||
// TODO
|
|
||||||
Ok(get_protocols::v3::Response {
|
|
||||||
protocols: BTreeMap::new(),
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,49 +0,0 @@
|
||||||
use ruma::api::client::{error::ErrorKind, threads::get_threads};
|
|
||||||
|
|
||||||
use crate::{services, Error, Result, Ruma};
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/threads`
|
|
||||||
pub async fn get_threads_route(
|
|
||||||
body: Ruma<get_threads::v1::Request>,
|
|
||||||
) -> Result<get_threads::v1::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
// Use limit or else 10, with maximum 100
|
|
||||||
let limit = body
|
|
||||||
.limit
|
|
||||||
.and_then(|l| l.try_into().ok())
|
|
||||||
.unwrap_or(10)
|
|
||||||
.min(100);
|
|
||||||
|
|
||||||
let from = if let Some(from) = &body.from {
|
|
||||||
from.parse()
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, ""))?
|
|
||||||
} else {
|
|
||||||
u64::MAX
|
|
||||||
};
|
|
||||||
|
|
||||||
let threads = services()
|
|
||||||
.rooms
|
|
||||||
.threads
|
|
||||||
.threads_until(sender_user, &body.room_id, from, &body.include)?
|
|
||||||
.take(limit)
|
|
||||||
.filter_map(|r| r.ok())
|
|
||||||
.filter(|(_, pdu)| {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.user_can_see_event(sender_user, &body.room_id, &pdu.event_id)
|
|
||||||
.unwrap_or(false)
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let next_batch = threads.last().map(|(count, _)| count.to_string());
|
|
||||||
|
|
||||||
Ok(get_threads::v1::Response {
|
|
||||||
chunk: threads
|
|
||||||
.into_iter()
|
|
||||||
.map(|(_, pdu)| pdu.to_room_event())
|
|
||||||
.collect(),
|
|
||||||
next_batch,
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,46 +0,0 @@
|
||||||
use crate::{services, utils, Error, Result, Ruma};
|
|
||||||
use ruma::api::client::{error::ErrorKind, typing::create_typing_event};
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}`
|
|
||||||
///
|
|
||||||
/// Sets the typing state of the sender user.
|
|
||||||
pub async fn create_typing_event_route(
|
|
||||||
body: Ruma<create_typing_event::v3::Request>,
|
|
||||||
) -> Result<create_typing_event::v3::Response> {
|
|
||||||
use create_typing_event::v3::Typing;
|
|
||||||
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if !services()
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.is_joined(sender_user, &body.room_id)?
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::forbidden(),
|
|
||||||
"You are not in this room.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Typing::Yes(duration) = body.state {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.edus
|
|
||||||
.typing
|
|
||||||
.typing_add(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
duration.as_millis() as u64 + utils::millis_since_unix_epoch(),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
} else {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.edus
|
|
||||||
.typing
|
|
||||||
.typing_remove(sender_user, &body.room_id)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(create_typing_event::v3::Response {})
|
|
||||||
}
|
|
|
@ -1,37 +0,0 @@
|
||||||
use std::{collections::BTreeMap, iter::FromIterator};
|
|
||||||
|
|
||||||
use ruma::api::client::discovery::get_supported_versions;
|
|
||||||
|
|
||||||
use crate::{Result, Ruma};
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/versions`
|
|
||||||
///
|
|
||||||
/// Get the versions of the specification and unstable features supported by this server.
|
|
||||||
///
|
|
||||||
/// - Versions take the form MAJOR.MINOR.PATCH
|
|
||||||
/// - Only the latest PATCH release will be reported for each MAJOR.MINOR value
|
|
||||||
/// - Unstable features are namespaced and may include version information in their name
|
|
||||||
///
|
|
||||||
/// Note: Unstable features are used while developing new features. Clients should avoid using
|
|
||||||
/// unstable features in their stable releases
|
|
||||||
pub async fn get_supported_versions_route(
|
|
||||||
_body: Ruma<get_supported_versions::Request>,
|
|
||||||
) -> Result<get_supported_versions::Response> {
|
|
||||||
let resp = get_supported_versions::Response {
|
|
||||||
versions: vec![
|
|
||||||
"r0.5.0".to_owned(),
|
|
||||||
"r0.6.0".to_owned(),
|
|
||||||
"v1.1".to_owned(),
|
|
||||||
"v1.2".to_owned(),
|
|
||||||
"v1.3".to_owned(),
|
|
||||||
"v1.4".to_owned(),
|
|
||||||
"v1.5".to_owned(),
|
|
||||||
],
|
|
||||||
unstable_features: BTreeMap::from_iter([
|
|
||||||
("org.matrix.e2e_cross_signing".to_owned(), true),
|
|
||||||
("org.matrix.msc3916.stable".to_owned(), true),
|
|
||||||
]),
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(resp)
|
|
||||||
}
|
|
|
@ -1,101 +0,0 @@
|
||||||
use crate::{services, Result, Ruma};
|
|
||||||
use ruma::{
|
|
||||||
api::client::user_directory::search_users,
|
|
||||||
events::{
|
|
||||||
room::join_rules::{JoinRule, RoomJoinRulesEventContent},
|
|
||||||
StateEventType,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/user_directory/search`
|
|
||||||
///
|
|
||||||
/// Searches all known users for a match.
|
|
||||||
///
|
|
||||||
/// - Hides any local users that aren't in any public rooms (i.e. those that have the join rule set to public)
|
|
||||||
/// and don't share a room with the sender
|
|
||||||
pub async fn search_users_route(
|
|
||||||
body: Ruma<search_users::v3::Request>,
|
|
||||||
) -> Result<search_users::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let limit = u64::from(body.limit) as usize;
|
|
||||||
|
|
||||||
let mut users = services().users.iter().filter_map(|user_id| {
|
|
||||||
// Filter out buggy users (they should not exist, but you never know...)
|
|
||||||
let user_id = user_id.ok()?;
|
|
||||||
|
|
||||||
let user = search_users::v3::User {
|
|
||||||
user_id: user_id.clone(),
|
|
||||||
display_name: services().users.displayname(&user_id).ok()?,
|
|
||||||
avatar_url: services().users.avatar_url(&user_id).ok()?,
|
|
||||||
};
|
|
||||||
|
|
||||||
let user_id_matches = user
|
|
||||||
.user_id
|
|
||||||
.to_string()
|
|
||||||
.to_lowercase()
|
|
||||||
.contains(&body.search_term.to_lowercase());
|
|
||||||
|
|
||||||
let user_displayname_matches = user
|
|
||||||
.display_name
|
|
||||||
.as_ref()
|
|
||||||
.filter(|name| {
|
|
||||||
name.to_lowercase()
|
|
||||||
.contains(&body.search_term.to_lowercase())
|
|
||||||
})
|
|
||||||
.is_some();
|
|
||||||
|
|
||||||
if !user_id_matches && !user_displayname_matches {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
// It's a matching user, but is the sender allowed to see them?
|
|
||||||
let mut user_visible = false;
|
|
||||||
|
|
||||||
let user_is_in_public_rooms = services()
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.rooms_joined(&user_id)
|
|
||||||
.filter_map(|r| r.ok())
|
|
||||||
.any(|room| {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_get(&room, &StateEventType::RoomJoinRules, "")
|
|
||||||
.map_or(false, |event| {
|
|
||||||
event.map_or(false, |event| {
|
|
||||||
serde_json::from_str(event.content.get())
|
|
||||||
.map_or(false, |r: RoomJoinRulesEventContent| {
|
|
||||||
r.join_rule == JoinRule::Public
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
});
|
|
||||||
|
|
||||||
if user_is_in_public_rooms {
|
|
||||||
user_visible = true;
|
|
||||||
} else {
|
|
||||||
let user_is_in_shared_rooms = services()
|
|
||||||
.rooms
|
|
||||||
.user
|
|
||||||
.get_shared_rooms(vec![sender_user.clone(), user_id])
|
|
||||||
.ok()?
|
|
||||||
.next()
|
|
||||||
.is_some();
|
|
||||||
|
|
||||||
if user_is_in_shared_rooms {
|
|
||||||
user_visible = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !user_visible {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
Some(user)
|
|
||||||
});
|
|
||||||
|
|
||||||
let results = users.by_ref().take(limit).collect();
|
|
||||||
let limited = users.next().is_some();
|
|
||||||
|
|
||||||
Ok(search_users::v3::Response { results, limited })
|
|
||||||
}
|
|
|
@ -1,48 +0,0 @@
|
||||||
use crate::{services, Result, Ruma};
|
|
||||||
use base64::{engine::general_purpose, Engine as _};
|
|
||||||
use hmac::{Hmac, Mac};
|
|
||||||
use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch};
|
|
||||||
use sha1::Sha1;
|
|
||||||
use std::time::{Duration, SystemTime};
|
|
||||||
|
|
||||||
type HmacSha1 = Hmac<Sha1>;
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/voip/turnServer`
|
|
||||||
///
|
|
||||||
/// TODO: Returns information about the recommended turn server.
|
|
||||||
pub async fn turn_server_route(
|
|
||||||
body: Ruma<get_turn_server_info::v3::Request>,
|
|
||||||
) -> Result<get_turn_server_info::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let turn_secret = services().globals.turn_secret().clone();
|
|
||||||
|
|
||||||
let (username, password) = if !turn_secret.is_empty() {
|
|
||||||
let expiry = SecondsSinceUnixEpoch::from_system_time(
|
|
||||||
SystemTime::now() + Duration::from_secs(services().globals.turn_ttl()),
|
|
||||||
)
|
|
||||||
.expect("time is valid");
|
|
||||||
|
|
||||||
let username: String = format!("{}:{}", expiry.get(), sender_user);
|
|
||||||
|
|
||||||
let mut mac = HmacSha1::new_from_slice(turn_secret.as_bytes())
|
|
||||||
.expect("HMAC can take key of any size");
|
|
||||||
mac.update(username.as_bytes());
|
|
||||||
|
|
||||||
let password: String = general_purpose::STANDARD.encode(mac.finalize().into_bytes());
|
|
||||||
|
|
||||||
(username, password)
|
|
||||||
} else {
|
|
||||||
(
|
|
||||||
services().globals.turn_username().clone(),
|
|
||||||
services().globals.turn_password().clone(),
|
|
||||||
)
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(get_turn_server_info::v3::Response {
|
|
||||||
username,
|
|
||||||
password,
|
|
||||||
uris: services().globals.turn_uris().to_vec(),
|
|
||||||
ttl: Duration::from_secs(services().globals.turn_ttl()),
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,22 +0,0 @@
|
||||||
use ruma::api::client::discovery::discover_homeserver::{
|
|
||||||
self, HomeserverInfo, SlidingSyncProxyInfo,
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{services, Result, Ruma};
|
|
||||||
|
|
||||||
/// # `GET /.well-known/matrix/client`
|
|
||||||
///
|
|
||||||
/// Returns the client server discovery information.
|
|
||||||
pub async fn well_known_client(
|
|
||||||
_body: Ruma<discover_homeserver::Request>,
|
|
||||||
) -> Result<discover_homeserver::Response> {
|
|
||||||
let client_url = services().globals.well_known_client();
|
|
||||||
|
|
||||||
Ok(discover_homeserver::Response {
|
|
||||||
homeserver: HomeserverInfo {
|
|
||||||
base_url: client_url.clone(),
|
|
||||||
},
|
|
||||||
identity_server: None,
|
|
||||||
sliding_sync_proxy: Some(SlidingSyncProxyInfo { url: client_url }),
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,4 +0,0 @@
|
||||||
pub mod appservice_server;
|
|
||||||
pub mod client_server;
|
|
||||||
pub mod ruma_wrapper;
|
|
||||||
pub mod server_server;
|
|
|
@ -1,369 +0,0 @@
|
||||||
use std::{collections::BTreeMap, iter::FromIterator, str};
|
|
||||||
|
|
||||||
use axum::{
|
|
||||||
async_trait,
|
|
||||||
body::Body,
|
|
||||||
extract::{FromRequest, Path},
|
|
||||||
response::{IntoResponse, Response},
|
|
||||||
RequestExt, RequestPartsExt,
|
|
||||||
};
|
|
||||||
use axum_extra::{
|
|
||||||
headers::{authorization::Bearer, Authorization},
|
|
||||||
typed_header::TypedHeaderRejectionReason,
|
|
||||||
TypedHeader,
|
|
||||||
};
|
|
||||||
use bytes::{BufMut, BytesMut};
|
|
||||||
use http::{Request, StatusCode};
|
|
||||||
use ruma::{
|
|
||||||
api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse},
|
|
||||||
server_util::authorization::XMatrix,
|
|
||||||
CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedUserId, UserId,
|
|
||||||
};
|
|
||||||
use serde::Deserialize;
|
|
||||||
use tracing::{debug, error, warn};
|
|
||||||
|
|
||||||
use super::{Ruma, RumaResponse};
|
|
||||||
use crate::{service::appservice::RegistrationInfo, services, Error, Result};
|
|
||||||
|
|
||||||
enum Token {
|
|
||||||
Appservice(Box<RegistrationInfo>),
|
|
||||||
User((OwnedUserId, OwnedDeviceId)),
|
|
||||||
Invalid,
|
|
||||||
None,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T, S> FromRequest<S> for Ruma<T>
|
|
||||||
where
|
|
||||||
T: IncomingRequest,
|
|
||||||
{
|
|
||||||
type Rejection = Error;
|
|
||||||
|
|
||||||
async fn from_request(req: Request<Body>, _state: &S) -> Result<Self, Self::Rejection> {
|
|
||||||
#[derive(Deserialize)]
|
|
||||||
struct QueryParams {
|
|
||||||
access_token: Option<String>,
|
|
||||||
user_id: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
let (mut parts, mut body) = {
|
|
||||||
let limited_req = req.with_limited_body();
|
|
||||||
let (parts, body) = limited_req.into_parts();
|
|
||||||
let body = axum::body::to_bytes(
|
|
||||||
body,
|
|
||||||
services()
|
|
||||||
.globals
|
|
||||||
.max_request_size()
|
|
||||||
.try_into()
|
|
||||||
.unwrap_or(usize::MAX),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?;
|
|
||||||
(parts, body)
|
|
||||||
};
|
|
||||||
|
|
||||||
let metadata = T::METADATA;
|
|
||||||
let auth_header: Option<TypedHeader<Authorization<Bearer>>> = parts.extract().await?;
|
|
||||||
let path_params: Path<Vec<String>> = parts.extract().await?;
|
|
||||||
|
|
||||||
let query = parts.uri.query().unwrap_or_default();
|
|
||||||
let query_params: QueryParams = match serde_html_form::from_str(query) {
|
|
||||||
Ok(params) => params,
|
|
||||||
Err(e) => {
|
|
||||||
error!(%query, "Failed to deserialize query parameters: {}", e);
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Unknown,
|
|
||||||
"Failed to read query parameters",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let token = match &auth_header {
|
|
||||||
Some(TypedHeader(Authorization(bearer))) => Some(bearer.token()),
|
|
||||||
None => query_params.access_token.as_deref(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let token = if let Some(token) = token {
|
|
||||||
if let Some(reg_info) = services().appservice.find_from_token(token).await {
|
|
||||||
Token::Appservice(Box::new(reg_info.clone()))
|
|
||||||
} else if let Some((user_id, device_id)) = services().users.find_from_token(token)? {
|
|
||||||
Token::User((user_id, OwnedDeviceId::from(device_id)))
|
|
||||||
} else {
|
|
||||||
Token::Invalid
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Token::None
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut json_body = serde_json::from_slice::<CanonicalJsonValue>(&body).ok();
|
|
||||||
|
|
||||||
let (sender_user, sender_device, sender_servername, appservice_info) =
|
|
||||||
match (metadata.authentication, token) {
|
|
||||||
(_, Token::Invalid) => {
|
|
||||||
// OpenID endpoint uses a query param with the same name, drop this once query params for user auth are removed from the spec
|
|
||||||
if query_params.access_token.is_some() {
|
|
||||||
(None, None, None, None)
|
|
||||||
} else {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::UnknownToken { soft_logout: false },
|
|
||||||
"Unknown access token.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
(AuthScheme::AccessToken, Token::Appservice(info)) => {
|
|
||||||
let user_id = query_params
|
|
||||||
.user_id
|
|
||||||
.map_or_else(
|
|
||||||
|| {
|
|
||||||
UserId::parse_with_server_name(
|
|
||||||
info.registration.sender_localpart.as_str(),
|
|
||||||
services().globals.server_name(),
|
|
||||||
)
|
|
||||||
},
|
|
||||||
UserId::parse,
|
|
||||||
)
|
|
||||||
.map_err(|_| {
|
|
||||||
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if !info.is_user_match(&user_id) {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Exclusive,
|
|
||||||
"User is not in namespace.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
if !services().users.exists(&user_id)? {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::forbidden(),
|
|
||||||
"User does not exist.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
(Some(user_id), None, None, Some(*info))
|
|
||||||
}
|
|
||||||
(
|
|
||||||
AuthScheme::None
|
|
||||||
| AuthScheme::AppserviceToken
|
|
||||||
| AuthScheme::AccessTokenOptional,
|
|
||||||
Token::Appservice(info),
|
|
||||||
) => (None, None, None, Some(*info)),
|
|
||||||
(AuthScheme::AccessToken, Token::None) => {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::MissingToken,
|
|
||||||
"Missing access token.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
(
|
|
||||||
AuthScheme::AccessToken | AuthScheme::AccessTokenOptional | AuthScheme::None,
|
|
||||||
Token::User((user_id, device_id)),
|
|
||||||
) => (Some(user_id), Some(device_id), None, None),
|
|
||||||
(AuthScheme::ServerSignatures, Token::None) => {
|
|
||||||
let TypedHeader(Authorization(x_matrix)) = parts
|
|
||||||
.extract::<TypedHeader<Authorization<XMatrix>>>()
|
|
||||||
.await
|
|
||||||
.map_err(|e| {
|
|
||||||
warn!("Missing or invalid Authorization header: {}", e);
|
|
||||||
|
|
||||||
let msg = match e.reason() {
|
|
||||||
TypedHeaderRejectionReason::Missing => {
|
|
||||||
"Missing Authorization header."
|
|
||||||
}
|
|
||||||
TypedHeaderRejectionReason::Error(_) => {
|
|
||||||
"Invalid X-Matrix signatures."
|
|
||||||
}
|
|
||||||
_ => "Unknown header-related error",
|
|
||||||
};
|
|
||||||
|
|
||||||
Error::BadRequest(ErrorKind::forbidden(), msg)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if let Some(dest) = x_matrix.destination {
|
|
||||||
if dest != services().globals.server_name() {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Unauthorized,
|
|
||||||
"X-Matrix destination field does not match server name.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let origin_signatures = BTreeMap::from_iter([(
|
|
||||||
x_matrix.key.clone(),
|
|
||||||
CanonicalJsonValue::String(x_matrix.sig.to_string()),
|
|
||||||
)]);
|
|
||||||
|
|
||||||
let signatures = BTreeMap::from_iter([(
|
|
||||||
x_matrix.origin.as_str().to_owned(),
|
|
||||||
CanonicalJsonValue::Object(
|
|
||||||
origin_signatures
|
|
||||||
.into_iter()
|
|
||||||
.map(|(k, v)| (k.to_string(), v))
|
|
||||||
.collect(),
|
|
||||||
),
|
|
||||||
)]);
|
|
||||||
|
|
||||||
let mut request_map = BTreeMap::from_iter([
|
|
||||||
(
|
|
||||||
"method".to_owned(),
|
|
||||||
CanonicalJsonValue::String(parts.method.to_string()),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"uri".to_owned(),
|
|
||||||
CanonicalJsonValue::String(parts.uri.to_string()),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"origin".to_owned(),
|
|
||||||
CanonicalJsonValue::String(x_matrix.origin.as_str().to_owned()),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"destination".to_owned(),
|
|
||||||
CanonicalJsonValue::String(
|
|
||||||
services().globals.server_name().as_str().to_owned(),
|
|
||||||
),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"signatures".to_owned(),
|
|
||||||
CanonicalJsonValue::Object(signatures),
|
|
||||||
),
|
|
||||||
]);
|
|
||||||
|
|
||||||
if let Some(json_body) = &json_body {
|
|
||||||
request_map.insert("content".to_owned(), json_body.clone());
|
|
||||||
};
|
|
||||||
|
|
||||||
let keys_result = services()
|
|
||||||
.rooms
|
|
||||||
.event_handler
|
|
||||||
.fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_string()], false)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let keys = match keys_result {
|
|
||||||
Ok(b) => b,
|
|
||||||
Err(e) => {
|
|
||||||
warn!("Failed to fetch signing keys: {}", e);
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::forbidden(),
|
|
||||||
"Failed to fetch signing keys.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Only verify_keys that are currently valid should be used for validating requests
|
|
||||||
// as per MSC4029
|
|
||||||
let pub_key_map = BTreeMap::from_iter([(
|
|
||||||
x_matrix.origin.as_str().to_owned(),
|
|
||||||
if keys.valid_until_ts > MilliSecondsSinceUnixEpoch::now() {
|
|
||||||
keys.verify_keys
|
|
||||||
.into_iter()
|
|
||||||
.map(|(id, key)| (id, key.key))
|
|
||||||
.collect()
|
|
||||||
} else {
|
|
||||||
BTreeMap::new()
|
|
||||||
},
|
|
||||||
)]);
|
|
||||||
|
|
||||||
match ruma::signatures::verify_json(&pub_key_map, &request_map) {
|
|
||||||
Ok(()) => (None, None, Some(x_matrix.origin), None),
|
|
||||||
Err(e) => {
|
|
||||||
warn!(
|
|
||||||
"Failed to verify json request from {}: {}\n{:?}",
|
|
||||||
x_matrix.origin, e, request_map
|
|
||||||
);
|
|
||||||
|
|
||||||
if parts.uri.to_string().contains('@') {
|
|
||||||
warn!(
|
|
||||||
"Request uri contained '@' character. Make sure your \
|
|
||||||
reverse proxy gives Conduit the raw uri (apache: use \
|
|
||||||
nocanon)"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::forbidden(),
|
|
||||||
"Failed to verify X-Matrix signatures.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
(
|
|
||||||
AuthScheme::None
|
|
||||||
| AuthScheme::AppserviceToken
|
|
||||||
| AuthScheme::AccessTokenOptional,
|
|
||||||
Token::None,
|
|
||||||
) => (None, None, None, None),
|
|
||||||
(AuthScheme::ServerSignatures, Token::Appservice(_) | Token::User(_)) => {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Unauthorized,
|
|
||||||
"Only server signatures should be used on this endpoint.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
(AuthScheme::AppserviceToken, Token::User(_)) => {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Unauthorized,
|
|
||||||
"Only appservice access tokens should be used on this endpoint.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut http_request = Request::builder().uri(parts.uri).method(parts.method);
|
|
||||||
*http_request.headers_mut().unwrap() = parts.headers;
|
|
||||||
|
|
||||||
if let Some(CanonicalJsonValue::Object(json_body)) = &mut json_body {
|
|
||||||
let user_id = sender_user.clone().unwrap_or_else(|| {
|
|
||||||
UserId::parse_with_server_name("", services().globals.server_name())
|
|
||||||
.expect("we know this is valid")
|
|
||||||
});
|
|
||||||
|
|
||||||
let uiaa_request = json_body
|
|
||||||
.get("auth")
|
|
||||||
.and_then(|auth| auth.as_object())
|
|
||||||
.and_then(|auth| auth.get("session"))
|
|
||||||
.and_then(|session| session.as_str())
|
|
||||||
.and_then(|session| {
|
|
||||||
services().uiaa.get_uiaa_request(
|
|
||||||
&user_id,
|
|
||||||
&sender_device.clone().unwrap_or_else(|| "".into()),
|
|
||||||
session,
|
|
||||||
)
|
|
||||||
});
|
|
||||||
|
|
||||||
if let Some(CanonicalJsonValue::Object(initial_request)) = uiaa_request {
|
|
||||||
for (key, value) in initial_request {
|
|
||||||
json_body.entry(key).or_insert(value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut buf = BytesMut::new().writer();
|
|
||||||
serde_json::to_writer(&mut buf, json_body).expect("value serialization can't fail");
|
|
||||||
body = buf.into_inner().freeze();
|
|
||||||
}
|
|
||||||
|
|
||||||
let http_request = http_request.body(&*body).unwrap();
|
|
||||||
|
|
||||||
debug!("{:?}", http_request);
|
|
||||||
|
|
||||||
let body = T::try_from_http_request(http_request, &path_params).map_err(|e| {
|
|
||||||
warn!("try_from_http_request failed: {:?}", e);
|
|
||||||
debug!("JSON body: {:?}", json_body);
|
|
||||||
Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(Ruma {
|
|
||||||
body,
|
|
||||||
sender_user,
|
|
||||||
sender_device,
|
|
||||||
sender_servername,
|
|
||||||
appservice_info,
|
|
||||||
json_body,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: OutgoingResponse> IntoResponse for RumaResponse<T> {
|
|
||||||
fn into_response(self) -> Response {
|
|
||||||
match self.0.try_into_http_response::<BytesMut>() {
|
|
||||||
Ok(res) => res.map(BytesMut::freeze).map(Body::from).into_response(),
|
|
||||||
Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,43 +0,0 @@
|
||||||
use crate::{service::appservice::RegistrationInfo, Error};
|
|
||||||
use ruma::{
|
|
||||||
api::client::uiaa::UiaaResponse, CanonicalJsonValue, OwnedDeviceId, OwnedServerName,
|
|
||||||
OwnedUserId,
|
|
||||||
};
|
|
||||||
use std::ops::Deref;
|
|
||||||
|
|
||||||
#[cfg(feature = "conduit_bin")]
|
|
||||||
mod axum;
|
|
||||||
|
|
||||||
/// Extractor for Ruma request structs
|
|
||||||
pub struct Ruma<T> {
|
|
||||||
pub body: T,
|
|
||||||
pub sender_user: Option<OwnedUserId>,
|
|
||||||
pub sender_device: Option<OwnedDeviceId>,
|
|
||||||
pub sender_servername: Option<OwnedServerName>,
|
|
||||||
// This is None when body is not a valid string
|
|
||||||
pub json_body: Option<CanonicalJsonValue>,
|
|
||||||
pub appservice_info: Option<RegistrationInfo>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Deref for Ruma<T> {
|
|
||||||
type Target = T;
|
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&self.body
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct RumaResponse<T>(pub T);
|
|
||||||
|
|
||||||
impl<T> From<T> for RumaResponse<T> {
|
|
||||||
fn from(t: T) -> Self {
|
|
||||||
Self(t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<Error> for RumaResponse<UiaaResponse> {
|
|
||||||
fn from(t: Error) -> Self {
|
|
||||||
t.to_response()
|
|
||||||
}
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,37 +1,27 @@
|
||||||
use crate::{services, utils, Error, Result};
|
use crate::{utils, Error, Result};
|
||||||
use bytes::BytesMut;
|
use bytes::BytesMut;
|
||||||
use ruma::api::{
|
use ruma::api::{IncomingResponse, OutgoingRequest, SendAccessToken};
|
||||||
appservice::Registration, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken,
|
use std::{
|
||||||
|
convert::{TryFrom, TryInto},
|
||||||
|
fmt::Debug,
|
||||||
|
mem,
|
||||||
|
time::Duration,
|
||||||
};
|
};
|
||||||
use std::{fmt::Debug, mem, time::Duration};
|
|
||||||
use tracing::warn;
|
use tracing::warn;
|
||||||
|
|
||||||
/// Sends a request to an appservice
|
pub async fn send_request<T: OutgoingRequest>(
|
||||||
///
|
globals: &crate::database::globals::Globals,
|
||||||
/// Only returns None if there is no url specified in the appservice registration file
|
registration: serde_yaml::Value,
|
||||||
#[tracing::instrument(skip(request))]
|
|
||||||
pub(crate) async fn send_request<T>(
|
|
||||||
registration: Registration,
|
|
||||||
request: T,
|
request: T,
|
||||||
) -> Result<Option<T::IncomingResponse>>
|
) -> Result<T::IncomingResponse>
|
||||||
where
|
where
|
||||||
T: OutgoingRequest + Debug,
|
T: Debug,
|
||||||
{
|
{
|
||||||
let destination = match registration.url {
|
let destination = registration.get("url").unwrap().as_str().unwrap();
|
||||||
Some(url) => url,
|
let hs_token = registration.get("hs_token").unwrap().as_str().unwrap();
|
||||||
None => {
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let hs_token = registration.hs_token.as_str();
|
|
||||||
|
|
||||||
let mut http_request = request
|
let mut http_request = request
|
||||||
.try_into_http_request::<BytesMut>(
|
.try_into_http_request::<BytesMut>(&destination, SendAccessToken::IfRequired(""))
|
||||||
&destination,
|
|
||||||
SendAccessToken::IfRequired(hs_token),
|
|
||||||
&[MatrixVersion::V1_0],
|
|
||||||
)
|
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.map(|body| body.freeze());
|
.map(|body| body.freeze());
|
||||||
|
|
||||||
|
@ -50,26 +40,13 @@ where
|
||||||
);
|
);
|
||||||
*http_request.uri_mut() = parts.try_into().expect("our manipulation is always valid");
|
*http_request.uri_mut() = parts.try_into().expect("our manipulation is always valid");
|
||||||
|
|
||||||
let mut reqwest_request = reqwest::Request::try_from(http_request)?;
|
let mut reqwest_request = reqwest::Request::try_from(http_request)
|
||||||
|
.expect("all http requests are valid reqwest requests");
|
||||||
|
|
||||||
*reqwest_request.timeout_mut() = Some(Duration::from_secs(30));
|
*reqwest_request.timeout_mut() = Some(Duration::from_secs(30));
|
||||||
|
|
||||||
let url = reqwest_request.url().clone();
|
let url = reqwest_request.url().clone();
|
||||||
let mut response = match services()
|
let mut response = globals.reqwest_client().execute(reqwest_request).await?;
|
||||||
.globals
|
|
||||||
.default_client()
|
|
||||||
.execute(reqwest_request)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(r) => r,
|
|
||||||
Err(e) => {
|
|
||||||
warn!(
|
|
||||||
"Could not send request to appservice {:?} at {}: {}",
|
|
||||||
registration.id, destination, e
|
|
||||||
);
|
|
||||||
return Err(e.into());
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// reqwest::Response -> http::Response conversion
|
// reqwest::Response -> http::Response conversion
|
||||||
let status = response.status();
|
let status = response.status();
|
||||||
|
@ -103,8 +80,7 @@ where
|
||||||
.body(body)
|
.body(body)
|
||||||
.expect("reqwest body is valid http body"),
|
.expect("reqwest body is valid http body"),
|
||||||
);
|
);
|
||||||
|
response.map_err(|_| {
|
||||||
response.map(Some).map_err(|_| {
|
|
||||||
warn!(
|
warn!(
|
||||||
"Appservice returned invalid response bytes {}\n{}",
|
"Appservice returned invalid response bytes {}\n{}",
|
||||||
destination, url
|
destination, url
|
27
src/clap.rs
27
src/clap.rs
|
@ -1,27 +0,0 @@
|
||||||
//! Integration with `clap`
|
|
||||||
|
|
||||||
use clap::Parser;
|
|
||||||
|
|
||||||
/// Returns the current version of the crate with extra info if supplied
|
|
||||||
///
|
|
||||||
/// Set the environment variable `CONDUIT_VERSION_EXTRA` to any UTF-8 string to
|
|
||||||
/// include it in parenthesis after the SemVer version. A common value are git
|
|
||||||
/// commit hashes.
|
|
||||||
fn version() -> String {
|
|
||||||
let cargo_pkg_version = env!("CARGO_PKG_VERSION");
|
|
||||||
|
|
||||||
match option_env!("CONDUIT_VERSION_EXTRA") {
|
|
||||||
Some(x) => format!("{} ({})", cargo_pkg_version, x),
|
|
||||||
None => cargo_pkg_version.to_owned(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Command line arguments
|
|
||||||
#[derive(Parser)]
|
|
||||||
#[clap(about, version = version())]
|
|
||||||
pub struct Args {}
|
|
||||||
|
|
||||||
/// Parse command line arguments into structured data
|
|
||||||
pub fn parse() -> Args {
|
|
||||||
Args::parse()
|
|
||||||
}
|
|
743
src/client_server/account.rs
Normal file
743
src/client_server/account.rs
Normal file
|
@ -0,0 +1,743 @@
|
||||||
|
use std::{
|
||||||
|
collections::BTreeMap,
|
||||||
|
convert::{TryFrom, TryInto},
|
||||||
|
sync::Arc,
|
||||||
|
};
|
||||||
|
|
||||||
|
use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH};
|
||||||
|
use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma};
|
||||||
|
use ruma::{
|
||||||
|
api::client::{
|
||||||
|
error::ErrorKind,
|
||||||
|
r0::{
|
||||||
|
account::{
|
||||||
|
change_password, deactivate, get_username_availability, register, whoami,
|
||||||
|
ThirdPartyIdRemovalStatus,
|
||||||
|
},
|
||||||
|
contact::get_contacts,
|
||||||
|
uiaa::{AuthFlow, UiaaInfo},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
events::{
|
||||||
|
room::{
|
||||||
|
canonical_alias, guest_access, history_visibility, join_rules, member, message, name,
|
||||||
|
topic,
|
||||||
|
},
|
||||||
|
EventType,
|
||||||
|
},
|
||||||
|
identifiers::RoomName,
|
||||||
|
push, RoomAliasId, RoomId, RoomVersionId, UserId,
|
||||||
|
};
|
||||||
|
use tracing::info;
|
||||||
|
|
||||||
|
use register::RegistrationKind;
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::{get, post};
|
||||||
|
|
||||||
|
const GUEST_NAME_LENGTH: usize = 10;
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/register/available`
|
||||||
|
///
|
||||||
|
/// Checks if a username is valid and available on this server.
|
||||||
|
///
|
||||||
|
/// - Returns true if no user or appservice on this server claimed this username
|
||||||
|
/// - This will not reserve the username, so the username might become invalid when trying to register
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/register/available", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn get_register_available_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<get_username_availability::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_username_availability::Response> {
|
||||||
|
// Validate user id
|
||||||
|
let user_id = UserId::parse_with_server_name(body.username.clone(), db.globals.server_name())
|
||||||
|
.ok()
|
||||||
|
.filter(|user_id| {
|
||||||
|
!user_id.is_historical() && user_id.server_name() == db.globals.server_name()
|
||||||
|
})
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidUsername,
|
||||||
|
"Username is invalid.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
// Check if username is creative enough
|
||||||
|
if db.users.exists(&user_id)? {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::UserInUse,
|
||||||
|
"Desired user ID is already taken.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO add check for appservice namespaces
|
||||||
|
|
||||||
|
// If no if check is true we have an username that's available to be used.
|
||||||
|
Ok(get_username_availability::Response { available: true }.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `POST /_matrix/client/r0/register`
|
||||||
|
///
|
||||||
|
/// Register an account on this homeserver.
|
||||||
|
///
|
||||||
|
/// - Returns the device id and access_token unless `inhibit_login` is true
|
||||||
|
/// - When registering a guest account, all parameters except initial_device_display_name will be
|
||||||
|
/// ignored
|
||||||
|
/// - Creates a new account and a device for it
|
||||||
|
/// - The account will be populated with default account data
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
post("/_matrix/client/r0/register", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn register_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<register::Request<'_>>,
|
||||||
|
) -> ConduitResult<register::Response> {
|
||||||
|
if !db.globals.allow_registration() && !body.from_appservice {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::Forbidden,
|
||||||
|
"Registration has been disabled.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let is_guest = body.kind == RegistrationKind::Guest;
|
||||||
|
|
||||||
|
let mut missing_username = false;
|
||||||
|
|
||||||
|
// Validate user id
|
||||||
|
let user_id = UserId::parse_with_server_name(
|
||||||
|
if is_guest {
|
||||||
|
utils::random_string(GUEST_NAME_LENGTH)
|
||||||
|
} else {
|
||||||
|
body.username.clone().unwrap_or_else(|| {
|
||||||
|
// If the user didn't send a username field, that means the client is just trying
|
||||||
|
// the get an UIAA error to see available flows
|
||||||
|
missing_username = true;
|
||||||
|
// Just give the user a random name. He won't be able to register with it anyway.
|
||||||
|
utils::random_string(GUEST_NAME_LENGTH)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
.to_lowercase(),
|
||||||
|
db.globals.server_name(),
|
||||||
|
)
|
||||||
|
.ok()
|
||||||
|
.filter(|user_id| !user_id.is_historical() && user_id.server_name() == db.globals.server_name())
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidUsername,
|
||||||
|
"Username is invalid.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
// Check if username is creative enough
|
||||||
|
if !missing_username && db.users.exists(&user_id)? {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::UserInUse,
|
||||||
|
"Desired user ID is already taken.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
// UIAA
|
||||||
|
let mut uiaainfo = UiaaInfo {
|
||||||
|
flows: vec![AuthFlow {
|
||||||
|
stages: vec!["m.login.dummy".to_owned()],
|
||||||
|
}],
|
||||||
|
completed: Vec::new(),
|
||||||
|
params: Default::default(),
|
||||||
|
session: None,
|
||||||
|
auth_error: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
if !body.from_appservice {
|
||||||
|
if let Some(auth) = &body.auth {
|
||||||
|
let (worked, uiaainfo) = db.uiaa.try_auth(
|
||||||
|
&UserId::parse_with_server_name("", db.globals.server_name())
|
||||||
|
.expect("we know this is valid"),
|
||||||
|
"".into(),
|
||||||
|
auth,
|
||||||
|
&uiaainfo,
|
||||||
|
&db.users,
|
||||||
|
&db.globals,
|
||||||
|
)?;
|
||||||
|
if !worked {
|
||||||
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
|
}
|
||||||
|
// Success!
|
||||||
|
} else if let Some(json) = body.json_body {
|
||||||
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
|
db.uiaa.create(
|
||||||
|
&UserId::parse_with_server_name("", db.globals.server_name())
|
||||||
|
.expect("we know this is valid"),
|
||||||
|
"".into(),
|
||||||
|
&uiaainfo,
|
||||||
|
&json,
|
||||||
|
)?;
|
||||||
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
|
} else {
|
||||||
|
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if missing_username {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::MissingParam,
|
||||||
|
"Missing username field.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let password = if is_guest {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
body.password.as_deref()
|
||||||
|
};
|
||||||
|
|
||||||
|
// Create user
|
||||||
|
db.users.create(&user_id, password)?;
|
||||||
|
|
||||||
|
let displayname = format!("{} ⚡️", user_id.localpart());
|
||||||
|
|
||||||
|
db.users
|
||||||
|
.set_displayname(&user_id, Some(displayname.clone()))?;
|
||||||
|
|
||||||
|
// Initial data
|
||||||
|
db.account_data.update(
|
||||||
|
None,
|
||||||
|
&user_id,
|
||||||
|
EventType::PushRules,
|
||||||
|
&ruma::events::push_rules::PushRulesEvent {
|
||||||
|
content: ruma::events::push_rules::PushRulesEventContent {
|
||||||
|
global: push::Ruleset::server_default(&user_id),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&db.globals,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
if !is_guest && body.inhibit_login {
|
||||||
|
return Ok(register::Response {
|
||||||
|
access_token: None,
|
||||||
|
user_id,
|
||||||
|
device_id: None,
|
||||||
|
}
|
||||||
|
.into());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate new device id if the user didn't specify one
|
||||||
|
let device_id = if is_guest {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
body.device_id.clone()
|
||||||
|
}
|
||||||
|
.unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into());
|
||||||
|
|
||||||
|
// Generate new token for the device
|
||||||
|
let token = utils::random_string(TOKEN_LENGTH);
|
||||||
|
|
||||||
|
// Add device
|
||||||
|
db.users.create_device(
|
||||||
|
&user_id,
|
||||||
|
&device_id,
|
||||||
|
&token,
|
||||||
|
body.initial_device_display_name.clone(),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
// If this is the first user on this server, create the admins room
|
||||||
|
if db.users.count()? == 1 {
|
||||||
|
// Create a user for the server
|
||||||
|
let conduit_user = UserId::parse_with_server_name("conduit", db.globals.server_name())
|
||||||
|
.expect("@conduit:server_name is valid");
|
||||||
|
|
||||||
|
db.users.create(&conduit_user, None)?;
|
||||||
|
|
||||||
|
let room_id = RoomId::new(db.globals.server_name());
|
||||||
|
|
||||||
|
db.rooms.get_or_create_shortroomid(&room_id, &db.globals)?;
|
||||||
|
|
||||||
|
let mutex_state = Arc::clone(
|
||||||
|
db.globals
|
||||||
|
.roomid_mutex_state
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.entry(room_id.clone())
|
||||||
|
.or_default(),
|
||||||
|
);
|
||||||
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
|
let mut content = ruma::events::room::create::CreateEventContent::new(conduit_user.clone());
|
||||||
|
content.federate = true;
|
||||||
|
content.predecessor = None;
|
||||||
|
content.room_version = RoomVersionId::Version6;
|
||||||
|
|
||||||
|
// 1. The room create event
|
||||||
|
db.rooms.build_and_append_pdu(
|
||||||
|
PduBuilder {
|
||||||
|
event_type: EventType::RoomCreate,
|
||||||
|
content: serde_json::to_value(content).expect("event is valid, we just created it"),
|
||||||
|
unsigned: None,
|
||||||
|
state_key: Some("".to_owned()),
|
||||||
|
redacts: None,
|
||||||
|
},
|
||||||
|
&conduit_user,
|
||||||
|
&room_id,
|
||||||
|
&db,
|
||||||
|
&state_lock,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
// 2. Make conduit bot join
|
||||||
|
db.rooms.build_and_append_pdu(
|
||||||
|
PduBuilder {
|
||||||
|
event_type: EventType::RoomMember,
|
||||||
|
content: serde_json::to_value(member::MemberEventContent {
|
||||||
|
membership: member::MembershipState::Join,
|
||||||
|
displayname: None,
|
||||||
|
avatar_url: None,
|
||||||
|
is_direct: None,
|
||||||
|
third_party_invite: None,
|
||||||
|
blurhash: None,
|
||||||
|
reason: None,
|
||||||
|
})
|
||||||
|
.expect("event is valid, we just created it"),
|
||||||
|
unsigned: None,
|
||||||
|
state_key: Some(conduit_user.to_string()),
|
||||||
|
redacts: None,
|
||||||
|
},
|
||||||
|
&conduit_user,
|
||||||
|
&room_id,
|
||||||
|
&db,
|
||||||
|
&state_lock,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
// 3. Power levels
|
||||||
|
let mut users = BTreeMap::new();
|
||||||
|
users.insert(conduit_user.clone(), 100.into());
|
||||||
|
users.insert(user_id.clone(), 100.into());
|
||||||
|
|
||||||
|
db.rooms.build_and_append_pdu(
|
||||||
|
PduBuilder {
|
||||||
|
event_type: EventType::RoomPowerLevels,
|
||||||
|
content: serde_json::to_value(
|
||||||
|
ruma::events::room::power_levels::PowerLevelsEventContent {
|
||||||
|
users,
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.expect("event is valid, we just created it"),
|
||||||
|
unsigned: None,
|
||||||
|
state_key: Some("".to_owned()),
|
||||||
|
redacts: None,
|
||||||
|
},
|
||||||
|
&conduit_user,
|
||||||
|
&room_id,
|
||||||
|
&db,
|
||||||
|
&state_lock,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
// 4.1 Join Rules
|
||||||
|
db.rooms.build_and_append_pdu(
|
||||||
|
PduBuilder {
|
||||||
|
event_type: EventType::RoomJoinRules,
|
||||||
|
content: serde_json::to_value(join_rules::JoinRulesEventContent::new(
|
||||||
|
join_rules::JoinRule::Invite,
|
||||||
|
))
|
||||||
|
.expect("event is valid, we just created it"),
|
||||||
|
unsigned: None,
|
||||||
|
state_key: Some("".to_owned()),
|
||||||
|
redacts: None,
|
||||||
|
},
|
||||||
|
&conduit_user,
|
||||||
|
&room_id,
|
||||||
|
&db,
|
||||||
|
&state_lock,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
// 4.2 History Visibility
|
||||||
|
db.rooms.build_and_append_pdu(
|
||||||
|
PduBuilder {
|
||||||
|
event_type: EventType::RoomHistoryVisibility,
|
||||||
|
content: serde_json::to_value(
|
||||||
|
history_visibility::HistoryVisibilityEventContent::new(
|
||||||
|
history_visibility::HistoryVisibility::Shared,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.expect("event is valid, we just created it"),
|
||||||
|
unsigned: None,
|
||||||
|
state_key: Some("".to_owned()),
|
||||||
|
redacts: None,
|
||||||
|
},
|
||||||
|
&conduit_user,
|
||||||
|
&room_id,
|
||||||
|
&db,
|
||||||
|
&state_lock,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
// 4.3 Guest Access
|
||||||
|
db.rooms.build_and_append_pdu(
|
||||||
|
PduBuilder {
|
||||||
|
event_type: EventType::RoomGuestAccess,
|
||||||
|
content: serde_json::to_value(guest_access::GuestAccessEventContent::new(
|
||||||
|
guest_access::GuestAccess::Forbidden,
|
||||||
|
))
|
||||||
|
.expect("event is valid, we just created it"),
|
||||||
|
unsigned: None,
|
||||||
|
state_key: Some("".to_owned()),
|
||||||
|
redacts: None,
|
||||||
|
},
|
||||||
|
&conduit_user,
|
||||||
|
&room_id,
|
||||||
|
&db,
|
||||||
|
&state_lock,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
// 6. Events implied by name and topic
|
||||||
|
let room_name =
|
||||||
|
Box::<RoomName>::try_from(format!("{} Admin Room", db.globals.server_name()))
|
||||||
|
.expect("Room name is valid");
|
||||||
|
db.rooms.build_and_append_pdu(
|
||||||
|
PduBuilder {
|
||||||
|
event_type: EventType::RoomName,
|
||||||
|
content: serde_json::to_value(name::NameEventContent::new(Some(room_name)))
|
||||||
|
.expect("event is valid, we just created it"),
|
||||||
|
unsigned: None,
|
||||||
|
state_key: Some("".to_owned()),
|
||||||
|
redacts: None,
|
||||||
|
},
|
||||||
|
&conduit_user,
|
||||||
|
&room_id,
|
||||||
|
&db,
|
||||||
|
&state_lock,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
db.rooms.build_and_append_pdu(
|
||||||
|
PduBuilder {
|
||||||
|
event_type: EventType::RoomTopic,
|
||||||
|
content: serde_json::to_value(topic::TopicEventContent {
|
||||||
|
topic: format!("Manage {}", db.globals.server_name()),
|
||||||
|
})
|
||||||
|
.expect("event is valid, we just created it"),
|
||||||
|
unsigned: None,
|
||||||
|
state_key: Some("".to_owned()),
|
||||||
|
redacts: None,
|
||||||
|
},
|
||||||
|
&conduit_user,
|
||||||
|
&room_id,
|
||||||
|
&db,
|
||||||
|
&state_lock,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
// Room alias
|
||||||
|
let alias: RoomAliasId = format!("#admins:{}", db.globals.server_name())
|
||||||
|
.try_into()
|
||||||
|
.expect("#admins:server_name is a valid alias name");
|
||||||
|
|
||||||
|
db.rooms.build_and_append_pdu(
|
||||||
|
PduBuilder {
|
||||||
|
event_type: EventType::RoomCanonicalAlias,
|
||||||
|
content: serde_json::to_value(canonical_alias::CanonicalAliasEventContent {
|
||||||
|
alias: Some(alias.clone()),
|
||||||
|
alt_aliases: Vec::new(),
|
||||||
|
})
|
||||||
|
.expect("event is valid, we just created it"),
|
||||||
|
unsigned: None,
|
||||||
|
state_key: Some("".to_owned()),
|
||||||
|
redacts: None,
|
||||||
|
},
|
||||||
|
&conduit_user,
|
||||||
|
&room_id,
|
||||||
|
&db,
|
||||||
|
&state_lock,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?;
|
||||||
|
|
||||||
|
// Invite and join the real user
|
||||||
|
db.rooms.build_and_append_pdu(
|
||||||
|
PduBuilder {
|
||||||
|
event_type: EventType::RoomMember,
|
||||||
|
content: serde_json::to_value(member::MemberEventContent {
|
||||||
|
membership: member::MembershipState::Invite,
|
||||||
|
displayname: None,
|
||||||
|
avatar_url: None,
|
||||||
|
is_direct: None,
|
||||||
|
third_party_invite: None,
|
||||||
|
blurhash: None,
|
||||||
|
reason: None,
|
||||||
|
})
|
||||||
|
.expect("event is valid, we just created it"),
|
||||||
|
unsigned: None,
|
||||||
|
state_key: Some(user_id.to_string()),
|
||||||
|
redacts: None,
|
||||||
|
},
|
||||||
|
&conduit_user,
|
||||||
|
&room_id,
|
||||||
|
&db,
|
||||||
|
&state_lock,
|
||||||
|
)?;
|
||||||
|
db.rooms.build_and_append_pdu(
|
||||||
|
PduBuilder {
|
||||||
|
event_type: EventType::RoomMember,
|
||||||
|
content: serde_json::to_value(member::MemberEventContent {
|
||||||
|
membership: member::MembershipState::Join,
|
||||||
|
displayname: Some(displayname),
|
||||||
|
avatar_url: None,
|
||||||
|
is_direct: None,
|
||||||
|
third_party_invite: None,
|
||||||
|
blurhash: None,
|
||||||
|
reason: None,
|
||||||
|
})
|
||||||
|
.expect("event is valid, we just created it"),
|
||||||
|
unsigned: None,
|
||||||
|
state_key: Some(user_id.to_string()),
|
||||||
|
redacts: None,
|
||||||
|
},
|
||||||
|
&user_id,
|
||||||
|
&room_id,
|
||||||
|
&db,
|
||||||
|
&state_lock,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
// Send welcome message
|
||||||
|
db.rooms.build_and_append_pdu(
|
||||||
|
PduBuilder {
|
||||||
|
event_type: EventType::RoomMessage,
|
||||||
|
content: serde_json::to_value(message::MessageEventContent::text_html(
|
||||||
|
"Thanks for trying out Conduit! This software is still in development, so expect many bugs and missing features. If you have federation enabled, you can join the Conduit chat room by typing `/join #conduit:matrix.org`. **Important: Please don't join any other Matrix rooms over federation without permission from the room's admins.** Some actions might trigger bugs in other server implementations, breaking the chat for everyone else.".to_owned(),
|
||||||
|
"Thanks for trying out Conduit! This software is still in development, so expect many bugs and missing features. If you have federation enabled, you can join the Conduit chat room by typing <code>/join #conduit:matrix.org</code>. <strong>Important: Please don't join any other Matrix rooms over federation without permission from the room's admins.</strong> Some actions might trigger bugs in other server implementations, breaking the chat for everyone else.".to_owned(),
|
||||||
|
))
|
||||||
|
.expect("event is valid, we just created it"),
|
||||||
|
unsigned: None,
|
||||||
|
state_key: None,
|
||||||
|
redacts: None,
|
||||||
|
},
|
||||||
|
&conduit_user,
|
||||||
|
&room_id,
|
||||||
|
&db,
|
||||||
|
&state_lock,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("{} registered on this server", user_id);
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(register::Response {
|
||||||
|
access_token: Some(token),
|
||||||
|
user_id,
|
||||||
|
device_id: Some(device_id),
|
||||||
|
}
|
||||||
|
.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `POST /_matrix/client/r0/account/password`
|
||||||
|
///
|
||||||
|
/// Changes the password of this account.
|
||||||
|
///
|
||||||
|
/// - Invalidates all other access tokens if logout_devices is true
|
||||||
|
/// - Deletes all other devices and most of their data (to-device events, last seen, etc.) if
|
||||||
|
/// logout_devices is true
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
post("/_matrix/client/r0/account/password", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn change_password_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<change_password::Request<'_>>,
|
||||||
|
) -> ConduitResult<change_password::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let mut uiaainfo = UiaaInfo {
|
||||||
|
flows: vec![AuthFlow {
|
||||||
|
stages: vec!["m.login.password".to_owned()],
|
||||||
|
}],
|
||||||
|
completed: Vec::new(),
|
||||||
|
params: Default::default(),
|
||||||
|
session: None,
|
||||||
|
auth_error: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(auth) = &body.auth {
|
||||||
|
let (worked, uiaainfo) = db.uiaa.try_auth(
|
||||||
|
&sender_user,
|
||||||
|
sender_device,
|
||||||
|
auth,
|
||||||
|
&uiaainfo,
|
||||||
|
&db.users,
|
||||||
|
&db.globals,
|
||||||
|
)?;
|
||||||
|
if !worked {
|
||||||
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
|
}
|
||||||
|
// Success!
|
||||||
|
} else if let Some(json) = body.json_body {
|
||||||
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
|
db.uiaa
|
||||||
|
.create(&sender_user, &sender_device, &uiaainfo, &json)?;
|
||||||
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
|
} else {
|
||||||
|
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
||||||
|
}
|
||||||
|
|
||||||
|
db.users
|
||||||
|
.set_password(&sender_user, Some(&body.new_password))?;
|
||||||
|
|
||||||
|
if body.logout_devices {
|
||||||
|
// Logout all devices except the current one
|
||||||
|
for id in db
|
||||||
|
.users
|
||||||
|
.all_device_ids(&sender_user)
|
||||||
|
.filter_map(|id| id.ok())
|
||||||
|
.filter(|id| id != sender_device)
|
||||||
|
{
|
||||||
|
db.users.remove_device(&sender_user, &id)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(change_password::Response {}.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET _matrix/client/r0/account/whoami`
|
||||||
|
///
|
||||||
|
/// Get user_id of this account.
|
||||||
|
///
|
||||||
|
/// - Also works for Application Services
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/account/whoami", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(body))]
|
||||||
|
pub async fn whoami_route(body: Ruma<whoami::Request>) -> ConduitResult<whoami::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
Ok(whoami::Response {
|
||||||
|
user_id: sender_user.clone(),
|
||||||
|
}
|
||||||
|
.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `POST /_matrix/client/r0/account/deactivate`
|
||||||
|
///
|
||||||
|
/// Deactivate this user's account
|
||||||
|
///
|
||||||
|
/// - Leaves all rooms and rejects all invitations
|
||||||
|
/// - Invalidates all access tokens
|
||||||
|
/// - Deletes all devices
|
||||||
|
/// - Removes ability to log in again
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
post("/_matrix/client/r0/account/deactivate", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn deactivate_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<deactivate::Request<'_>>,
|
||||||
|
) -> ConduitResult<deactivate::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let mut uiaainfo = UiaaInfo {
|
||||||
|
flows: vec![AuthFlow {
|
||||||
|
stages: vec!["m.login.password".to_owned()],
|
||||||
|
}],
|
||||||
|
completed: Vec::new(),
|
||||||
|
params: Default::default(),
|
||||||
|
session: None,
|
||||||
|
auth_error: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(auth) = &body.auth {
|
||||||
|
let (worked, uiaainfo) = db.uiaa.try_auth(
|
||||||
|
&sender_user,
|
||||||
|
&sender_device,
|
||||||
|
auth,
|
||||||
|
&uiaainfo,
|
||||||
|
&db.users,
|
||||||
|
&db.globals,
|
||||||
|
)?;
|
||||||
|
if !worked {
|
||||||
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
|
}
|
||||||
|
// Success!
|
||||||
|
} else if let Some(json) = body.json_body {
|
||||||
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
|
db.uiaa
|
||||||
|
.create(&sender_user, &sender_device, &uiaainfo, &json)?;
|
||||||
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
|
} else {
|
||||||
|
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Leave all joined rooms and reject all invitations
|
||||||
|
let all_rooms = db
|
||||||
|
.rooms
|
||||||
|
.rooms_joined(&sender_user)
|
||||||
|
.chain(
|
||||||
|
db.rooms
|
||||||
|
.rooms_invited(&sender_user)
|
||||||
|
.map(|t| t.map(|(r, _)| r)),
|
||||||
|
)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
for room_id in all_rooms {
|
||||||
|
let room_id = room_id?;
|
||||||
|
let event = member::MemberEventContent {
|
||||||
|
membership: member::MembershipState::Leave,
|
||||||
|
displayname: None,
|
||||||
|
avatar_url: None,
|
||||||
|
is_direct: None,
|
||||||
|
third_party_invite: None,
|
||||||
|
blurhash: None,
|
||||||
|
reason: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mutex_state = Arc::clone(
|
||||||
|
db.globals
|
||||||
|
.roomid_mutex_state
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.entry(room_id.clone())
|
||||||
|
.or_default(),
|
||||||
|
);
|
||||||
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
|
db.rooms.build_and_append_pdu(
|
||||||
|
PduBuilder {
|
||||||
|
event_type: EventType::RoomMember,
|
||||||
|
content: serde_json::to_value(event).expect("event is valid, we just created it"),
|
||||||
|
unsigned: None,
|
||||||
|
state_key: Some(sender_user.to_string()),
|
||||||
|
redacts: None,
|
||||||
|
},
|
||||||
|
&sender_user,
|
||||||
|
&room_id,
|
||||||
|
&db,
|
||||||
|
&state_lock,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove devices and mark account as deactivated
|
||||||
|
db.users.deactivate_account(&sender_user)?;
|
||||||
|
|
||||||
|
info!("{} deactivated their account", sender_user);
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(deactivate::Response {
|
||||||
|
id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport,
|
||||||
|
}
|
||||||
|
.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET _matrix/client/r0/account/3pid`
|
||||||
|
///
|
||||||
|
/// Get a list of third party identifiers associated with this account.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/account/3pid", data = "<body>")
|
||||||
|
)]
|
||||||
|
pub async fn third_party_route(
|
||||||
|
body: Ruma<get_contacts::Request>,
|
||||||
|
) -> ConduitResult<get_contacts::Response> {
|
||||||
|
let _sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
Ok(get_contacts::Response::new(Vec::new()).into())
|
||||||
|
}
|
133
src/client_server/alias.rs
Normal file
133
src/client_server/alias.rs
Normal file
|
@ -0,0 +1,133 @@
|
||||||
|
use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Ruma};
|
||||||
|
use regex::Regex;
|
||||||
|
use ruma::{
|
||||||
|
api::{
|
||||||
|
appservice,
|
||||||
|
client::{
|
||||||
|
error::ErrorKind,
|
||||||
|
r0::alias::{create_alias, delete_alias, get_alias},
|
||||||
|
},
|
||||||
|
federation,
|
||||||
|
},
|
||||||
|
RoomAliasId,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::{delete, get, put};
|
||||||
|
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
put("/_matrix/client/r0/directory/room/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn create_alias_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<create_alias::Request<'_>>,
|
||||||
|
) -> ConduitResult<create_alias::Response> {
|
||||||
|
if db.rooms.id_from_alias(&body.room_alias)?.is_some() {
|
||||||
|
return Err(Error::Conflict("Alias already exists."));
|
||||||
|
}
|
||||||
|
|
||||||
|
db.rooms
|
||||||
|
.set_alias(&body.room_alias, Some(&body.room_id), &db.globals)?;
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(create_alias::Response::new().into())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
delete("/_matrix/client/r0/directory/room/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn delete_alias_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<delete_alias::Request<'_>>,
|
||||||
|
) -> ConduitResult<delete_alias::Response> {
|
||||||
|
db.rooms.set_alias(&body.room_alias, None, &db.globals)?;
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(delete_alias::Response::new().into())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/directory/room/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn get_alias_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<get_alias::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_alias::Response> {
|
||||||
|
get_alias_helper(&db, &body.room_alias).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_alias_helper(
|
||||||
|
db: &Database,
|
||||||
|
room_alias: &RoomAliasId,
|
||||||
|
) -> ConduitResult<get_alias::Response> {
|
||||||
|
if room_alias.server_name() != db.globals.server_name() {
|
||||||
|
let response = db
|
||||||
|
.sending
|
||||||
|
.send_federation_request(
|
||||||
|
&db.globals,
|
||||||
|
room_alias.server_name(),
|
||||||
|
federation::query::get_room_information::v1::Request { room_alias },
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
return Ok(get_alias::Response::new(response.room_id, response.servers).into());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut room_id = None;
|
||||||
|
match db.rooms.id_from_alias(&room_alias)? {
|
||||||
|
Some(r) => room_id = Some(r),
|
||||||
|
None => {
|
||||||
|
for (_id, registration) in db.appservice.all()? {
|
||||||
|
let aliases = registration
|
||||||
|
.get("namespaces")
|
||||||
|
.and_then(|ns| ns.get("aliases"))
|
||||||
|
.and_then(|aliases| aliases.as_sequence())
|
||||||
|
.map_or_else(Vec::new, |aliases| {
|
||||||
|
aliases
|
||||||
|
.iter()
|
||||||
|
.filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok())
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
});
|
||||||
|
|
||||||
|
if aliases
|
||||||
|
.iter()
|
||||||
|
.any(|aliases| aliases.is_match(room_alias.as_str()))
|
||||||
|
&& db
|
||||||
|
.sending
|
||||||
|
.send_appservice_request(
|
||||||
|
&db.globals,
|
||||||
|
registration,
|
||||||
|
appservice::query::query_room_alias::v1::Request { room_alias },
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.is_ok()
|
||||||
|
{
|
||||||
|
room_id = Some(db.rooms.id_from_alias(&room_alias)?.ok_or_else(|| {
|
||||||
|
Error::bad_config("Appservice lied to us. Room does not exist.")
|
||||||
|
})?);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let room_id = match room_id {
|
||||||
|
Some(room_id) => room_id,
|
||||||
|
None => {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"Room with alias not found.",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(get_alias::Response::new(room_id, vec![db.globals.server_name().to_owned()]).into())
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue